var/home/core/zuul-output/0000755000175000017500000000000015136772662014544 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015136775714015511 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000107302315136775550020272 0ustar corecoreh{ikubelet.log][s8~_˾4eUnOǗ&Iv 0& _ %Km:p];=5I,>ȶ;7%U2ηwICoUkXf<?6ݳկ_E}SѲd=?gt>D -3i)xьM+F)++І? f)|lTft,t%||NٓCW.фѢizpc.Sԇ C0d[}2Kfb#^?spQ|v$V4gs7ْ'3ւVlTY97?.Xgљ9DdK`~a3W"QGJ䣓Og/'Fx5$v6>7'/j<C{sTfCUiOy54j iN>GFa݌Q)2ctImļO'ލNDQ]) ?4h&,i+]'wf10ڗه[#(|Bƿ`7OhC31Oξ8 1sySLl⺤ 3B仪,"U9n?YrbeO W,>)Zѓa9Z]9aUI;_^~JS[u8~NCaHg䬓CX5 vNo"??Dy 6kq,z>J"D譾N%F_&UL)Q<Ϥ{Xz ND: WHEoqdBґs\=+} Ǭ h.iwS0획8ʿM/<25 e qTfaD8J9,|z.4pږ>NI=a r{PЂ*2!;h)$O BM\aDP Qm_',*%7aEqTh!^ aWGd&7@5W"]i0%S*kG(x1CE c_15KoY*^%R_~.a"(s"Z7_ĭ|u d UTv_4jd5UƣuR2Fk6FA7Lg3g/~>4I&׬H \sW>IuW*K" _ȴ;Njx6j{(ܓf'3R#4=#7/ͅiqDb,sQpX2+`h`l偻Ō31"X6"t%qoJ4 Bl${‰*J4j|ׯlz[KowNQe 'ҕv76|6x&'8ID[4`cIVy:c9@ɕ~Y)m( sT* ;cQG,o~9O9/g.[vD(sQ,eN G&6hbs8S:Wmd?B8ީU}lm){+ySv}1.p(̵]!C31³8 ı-6!`uRq1TkDbaM<em5R d@j5$x "ix``{r⠅p@ύT(NQ(AD_LDY( }*KI)B}A^<)BD^D^#|{Gp%hpR =.TB /ХO7:M9S<@ 45} خ=[ˀ⨽u8eo  2Z µ1Q  βD~[Fµ1^h+CT0RtE"A\-tCc8 ;^UeKia; qAX98 Tb 4%r}{l -0~}2 \yoSW y1{;d(Dn4I!zӞ6s6p@XFWnH7ft^K6/FEޣ{wA@][mQ=,&߿p7eE[[mGҰ!TZ;RF7GM(I4ҹnjĮ[Q6煨zVLqu-2 K<YZ:FJZZrny}2ɒ$I2֬b[VH Ņ8晘Yf׌khI30j + NZbV<#.P%2ZS^9<{*uV \VI{x ә! &6V@no 1"#\Ѧ)oZrි;f`r!iI5"Ety=-)5`oOZ] kpY}#-.뜷5/β;–A zkYYa3i)oDE95{I(:В&x-~C'8\:vn`V[ÈMȫ`gY[/6mg lU=*z`H pɉvDA<薼D;<߆6ǬZ#s*.PPQ'*}H&_i-;i[otNTj'0u8*CJݱ. qBW;']) =%+V`psP~|*“ύ 3*Zcf墺!g|&Dj4MfG6;2|mtFw4J%r0Aό Q0qkY?T"c,Vтr*Ǻbuc|GZ 7]FI1!%1ԃ{bۖ)mئe nEF [u"47ȩ8q:S b\= |oZZufA|.ERZKol[TV J]kж=~B|7Hm2?|{4 dċo$?#? 8 qU%ᮍ (vS~mY) l5x#7L4ZCXҬbp}@mوIuMbqŪ \OQU=wu>Ee/~"k*M"Dt`76(' kuOh0Ihamg MI{V CB^|+e%03{uоW,>)Zy;9`Pq ̢!v#VHe++`Q<N^XÙ}(>nuK9mE)Jصmw(-]1RK&2J\PjE8e9-SPEzCڞ3>%@sJFh[P?$_ 8Џz*vx\%~V"R'R\o:jT]yL3[fhI<J|\fjr9]}n뮃Uj} %/?S76w82+/SY/ޭK]sMd=וHWΥ,[TD)2u@]Asl%: LJ>G%6[OvN8ˋ߂WՓϪk$%>%3u RKv.ZejoY%.@ػ߶qdnq.vH4۴=`dIN-d' R%:ܨ-o_0'Qfn#̷G@o" p[t2Y#Bv$>y{|wjzPj8qVfxƖ˼:֯#Җh0z.Sq=ϓw{? d(c9BXPTj8Gߏp`ʡP˦1Lӽwܥ_@.%mP)rΐ5pڙ7qX:w[SxnF=uyzZKד}Q\Եq)w ,Gy]g䊧ù{ĔJH$Bzpv9_xۛH-T]C=͝5W .NЧ=2woExwM3qwzMXuulVjrUg&w\ }G|K__=|N%=l #yRXI; eftV\vǤep@$*&]FR8m\ oÄM;h&хa;z39l'1$mjwڦ{wљF?`[d6A3R،5f=qշPpFW5fj-F?)졈zUȱSNR$/$-H6+  (0rGGܛrEz ME]> p>.]aGyIdcr%IEh7U_E>#R|yS PP?^?Zv `i`OoGeNHi@"#z$T$7ߐ2/XV7qjo6=W&! oǛzeKrCaqMQNK=u_@Sy256LWx/5 W%nT 1]87FEp527Ԭ: x3db w%+;XP w Ͳ˷FX/Q}M`+Gco AΑW?㟍Ho-QT%4䞠zo%tƅ[LれH tFjKB9, r .$;w@jĈaZȲwZ%* gZ6{J΃'ß:e@Q6l\ӁtV`N›VIO.߹qpyr;0_bk?ҳIO,7/K` ^$C<y\>MgLm[ +b`}j;NY~Gzl[W#i7uB78?wYpPb> h$NEe r< ~O4 "]* ^\rHm&t(blɘ?JڎKԔi Gш?E!{Fgh,8d?u}mJ&),`q SdGQLC7$vcBj^&XKq?_U>ҵ1bt734oT#kU9YN>Ī2bl2דOﭭ=  ]ԿSpa Q8 :H Zi}W:TܩW6H\¾;`]ܶ5SC* *TK3[q|m@˹) xk;E |!/V_ڄ-eʞF({$lBךmӦʞتʶjݪeӴOmG76-ʟF*$| B6j?P[*ڄ-*Fx$b B6:PHBBWt"!KV*(,e~ յBpit Yv>IGT"9`7qY#4%2[.Hti>,ж)CF2$R[MyBĭ `w9ݬ\3x),\*$30(P<Ж0̀g$X/o{ sh'HU#c!Ndߛ ,` 6?U4C@)CmQ};0$XM;NKgTܭxa+,y'i\M)`Jz\O@ՙ xY1y/z*d $g~LVeMc&Ho.d+""AdI1l%mb+ )7lLf؄%6@M҆homMCym.u )N%YD[GJ+XPFuDz3KFX9(c:NKI6.iXPq׫-L~ik.+먊h6+X@1/G")nf(ɽjN#p ^P9<`NJ5#.aLc4Ժ_fxD9B\| 6魘R[1*hJ.uچZQ; bM TtWfRcêA6; ^_Kqt+6DA: m-U(U1$>Kt=xf_Bޟ&8A:y@^]H]1*MhK֋l?E $b C3S,nɁU1äUxQӤUJǢ6L\o܍#[/A5Pls-ʎK w4X'N^6l-nYSV  lD_瘠tXwLtzDCfM{n J:/u?V!Gm=/ìCN"I(}MVb]nc.n[Bn[}Q_J9|<4lHY"dNA'y{9^*[Z\h,Ar](])72*?)7jﯳ@0ּ?7#7DAq7ȓyf.b1PR $l\iU2S$Qn80lib`;(hBiN *$ gRq Jm]Cڏʾ0*IŹ}L0Xfp@sFxBHY7D@S )jWm >ƺ@&$OfQrk:\HP=Eφi{ ^*u07H xXѽc`juޙ*%PLoos= |jpqxib+f֜=ǥර6점C)w):;Kkm#C)"lc6LbUddSsImc* jd:]s?6} o$#{gO(OpxݱVs? c1rV'`sU.{6]JeN[Z>~MTv,ԛ|IO }A!2aFn"KSrF`K%k;V~*O za+OJ_'0w<>C}aݠo[އ (Fd? }v*v. >d4沅+n2uȔ'~r4e\psw|S"W۴[,Hd )?3ìo )WܿYq%{){z{2F5ף]Gù>6 b8}lྌNѪ`)y*Lded/4rkڑ|9\I8` -)gby!':žm#Gs)|qxbN~) F1ᄍDA;dʻ} s\MYQ|X7ϒ=7]oJƥ@}sپ7vv7#! sr˺V!|U|@. h7WbN.0D&K)"aW`dD|1#p]B G";(x}$?k< Q,W;G=VHQJkp,t#ѹE-p x@ &9)8Q@S캄ʓ:m ٨wIE-!: sJCdK(qMGx  7yQ$#nAY K2%¾Su %D*g(9e4@lfi4eK(qC]Gf,dP&(r_>;LJ) LV_uyۜMi* M !LHd̙Ȩ$$!` Yl%ZXlBR#Dɀ}R N[R %rm-sQE ǁb 9a`0JN#T̉`Um/#jHƩYy׺-v:*رH}/xq;v\ui 1_6NؕPBr]55< zEIű( ] 33,NVQUy,{8Eܢ(Td.QS{UB G ZB,8\EΥNQFvK(fs[GV9yrЍRK=a@c %רd)8@ʋ:`g sX [`LFH>q9aޘN&aXB ]hRɈQ%`pXD }P`']*t ;SG0h€> YFP̦BX@)<#@jc jDD 7l"6*[ɕ9XrtcS&p9`p(ڤxR-Dհ?L\Tqڪ i"Id G&%P $l1Jhu3_}X x뫈)!!`%`ֺpn /Zfl{tp!: V"G9l1Ai GqE R9 FHH"JȯWv ~/۸u~+jp['L)[ .rGbc\Bq6(X)'9ȓyB>U#7_.@P:s?m$FDŇ>ppF.ñ nd U: ǢR=w4dE,DmHǠ*cJg h>miɚXo6dԛUM 5f t!영i,@ X$6BϤRK;VTWݟSc8Z HE%&tQ#|P(|H⾡T_TZi)I+ L*5gNU 6,Â{`<QwY-c:Xj.G*Z'e%,WX,s{ |²_Qҗ+߅Z'@nqׁS_VzTP~UAдUVP"xbɱ咤K(,$TXBRPaBZ )6%-aX#AJ1ƺDg᱖ (u<"}FAQeJ詞^{dU LHqmL Sa\YXk<]Tq\Xo|[ $SG%M9r)7Up%ծ]׆*t6'u$:ᠣg)N:WQ1 # <2'$kn4ZJ(፧nH2,g5*z6dI,Q.%dzN%2#$y6$,oSe1xK>q}A*86G8x UPk$M^ܛxW .bE.ـNy|UnflR$TQ naRn0@Nf8t=RA.0ܮUpdrpS|n0Hy8=$*̴sE%dUض:g-2Px F!c!U %OZR)J(X}p*8˒'SH.=~a(w0$/moܐS|.؏SRC]{ !\_eQ%u:@*\LHccf=N-7iٰ ͚ţHWIoY%wUpl|![MExjRSfq S%K::: %;K!~GW~2n) ג gl_-MƑR 8~oo3.|sVqGsvϞAY@nofi7o2.Ùge'vIK>߆ A)g[u,2Bam6~+f~mm X.oaX q]j(syx\1_c2@7<',OD~|O^|\0{?*?n:ݠ0nS6K>2lgg;|ev}= ׫۾_o___g| K~o%,b{Wwz;τOr޿&n,_^Y3_L_i 2P ЮVot W&7p4\muk1~_bW=mdG$լ\ }Ĕ}KϩدnPoʛ{^]j{˸v``p؀UT`̰qBP-,w4\<.b-Yt.yuޚl|BcPJ!ۮ/jZhiɴij.UQ6jup+ +ǐӇ |Gj+,)t㭿/^^_Ե v˒kA_TCCr6Ddr<'Z)\xVBrJ6^\MNm <ޥRbBҌ^T̨ ,nTG4lٸ 렩5J0ޣȒYy#/Ya<*"SO(Ay@&pb2H),·ĤwU٩Ddэ!{qo氩ڗ+Ӹp ӹB ]mo㸵+ۻ-V _vqAp(Jةlgf3oc:2ɗn')ys8`EAM&)O!#깰#w 1(m9n;sx}A]b^Rg:Ԯ=G0y ge( ;MK(,hPڃa9'1p@Ʋ B6p! $ >EQp$|PCբ< LX0Y Q-*4mȲę()C.Ȅ ;7먱O1#=gu`ؽPǾu7Mi9eM5vn3]F=^pAEzK%u1 o2L>)#Ea㮾=yD OY, **u #y0elPE]QoHQWDtP^|veǩ\Ev9䂮6|FQ[+J]Jl8xe9M(H*$>Z2<.dup19 ^AuHㆻq/'QpjPw 1&&GwdQAGM@6N+{H}+ JUE TB10Ll}ȹGRa7'MF CTJ:Br!V]w."_b+eB8o.9u`n?`pe{)hɄȂAu[1!ݖ#'W\52}_HJ5 ၞ_^Qp=7H9rSN&\USR4TcH6&nKrtE$|k"L^jB ̎/1U֪2+hKJ[p̤ן\8<`tf9,E:rsJQxֱ+-ݜ tdz, =d9I NMP~Az$61=/į/DAUx}CmEؿ(8\ю2L,ވPl0̔Q"5,8fFcj:yB4O !c.>F߷qLQ-̤m" Xv)"GTeiqr+eJ^v0;!!y2[qLq\Zg$#&H嫣1 +3:,VBwFHmWlz\4gĪkL?0k2QӇ nH *MpYi#ҜA2zvtM%Tk!?P[ / ~=sL}7i~RhX]W! V2f5{[,s( jcy)R-`ifJ `R2L ?4?6M\$={~];:y(pJ@觝`d8@(_<.ƐRQp외<9ZNa9*QZL1ڵ4`TI(8az;:$-"vՠz{%0] 8$ =vm}j__y-ǞI\cPȻX_L+ +sE'3rs#$zS:u [jTfQ 2sX;oK L|aH1m2f]-yUIRVjvanwW%sqEӉ[z̾LvQثw?dZ37b/z[:9B FkzRĵsJ ߧF0#<)qRrﺎGNOƺ{}ɻ&^`K˱12[ښ")@G./ljZ9E ~kHo.[l3>邝I+(#W,[)leBƽ޶nEMNU g`Okwa I=mh\"֠s)jZu%F.ȻK\OfrJ!L_n Ւ+38_x\eǭh/ǘ(;MxG-|S`mmq$Fg׸Cr;"VWrݻ!G=·&|%<+Ί`.D8 b|ySݟTe&F20PGɏi ost'2h7ltr5Mo~?0ń_R%$%8$K.Y;h J5)'m${v4-{?IΝX.F+xEUӶ~>Havo߭۫+fߦ+nfU/KG|~*?vikG]}aSXhL&N:ǺںzºtG)[ 7Q{6%DhL}+z1rw# 1d y}|W/]+a 7fr%72jWzs$dvIR. :~e4]M.ECoZKha.lIWI 9:D!<ԇ$CRl3:U0{^ގj̕ 9H5hPz@ ZaY-4(5ݦbHj.\͆3[,[7͈);_L8׃di2E�aIrHҠ&S0և а(*56mCz6 IhĨ'kqk QUH4ؚMٷɧjj2? I`fyF>O qer;[nUB<_a7.t1?hat'*#ry؉{7mږ-FISvmƧ# Y5(m4ۤ]ym]I.T׷ɬ6ydYה8+=y0kN85jP) h0:MWK4ٺ"sWݔOR{OWXLc$w v~|UrӿBlQaZr'pDC֏w_ M!Cz!A#!dE %|zy n% ly {g/^ib'x%[acIˤעszo\%E[[72 צN$]c`0$zעk ۡH"s\J;vn{sĸ{xT{eJ^x@j^IY†ߺmgk +o/8Ge\ei[%E*ejKVX̊RZȷ o6 :G20v ;\,y&^x$ϔ znyτF8F|OOXfJ_UJ*C3ܣ`wOꅜoT ^^"s^D/X{5`dotlse^ՅMë}]ībh˥x~-/jo|an XcIqm{CxU/pg[-ѮoH!%} *-Kl ]FFm7a뷛z5[c6wxs!3S"m!\4\B=̹R[ѯwH"Uc.5L@r'3/Z?reOPҸW,fGqۛ٢-BgZ3lsw #&%eXZ F0lR !y{M0xi RV: l:}c/륶p">#7mLGUt~e]rT_WR44%& RR/$kp's0Y7,lii?4]Y轑M_~-)/yV%He9G4NJ\"FLL >߮$]dɠ!%cӉ/e90؇K%Š"sō01gbI+PeV`NO0;Kn'8`v> 6ng 3gpӕt(y-d/ *kC$rϪj0RnlJ>GLӫw?OZMfvh5G(O6V9oS{< OޘOzlt+w9hnt AaQ+QNr+l1lx+Ȍs8vBSCtvlm6tBєx'o)dh\]įyo÷&;2g"(M; SQfKPFKÐ}[Z9e \by kO7ƾ8ߕmk]/!$sw\_ 3޵q$Ptz\M? ܥ(I9ݯ"ám 4::Oj >_+Yb ( F.{ʯ]賳#_`N'{}Ab;c|fyFϕ>-rA{J9^JЪ%?\LťjXpٲ.JW_Q{?2́J| gI&F$f~>rX/ܸkZbCT6겘BeBūjU#FMʷ{dGllbKկŖb懽 ƁiP!duT./mG5M!*{-)bzR$ KCzZ&|_<ߑrs0X礄kb KwAFGD>HCE =(Psq}/FUBc^6"eL]U)QU@;4@+)M_&`2cz'߇fR4VYJn> qKa[ѵ{rF{9t &Xq RKɔ2 p; [0݈uaݘbM5PF])B#.cW3V;z ɬO IV(+yQH1 iv5flDEs0;%ʹPC|-bE(rLjQ~ot(W  1Q8n4H-eƅ1`+H #k=`9-rQ ׅuޘMͲ).vG+$^ p޷0rlfRPOTXJq1/D2pd#; 0 Cq坤!A:' aiyNqބ$F:A(L.XWv ZsA3\7=#B?9Q|rb>q=SĀ83FAby҃n.]J' 0"WpmjIxvE$‡'*Qﯓoso׫ScۛPiuƑ쿺0:lXR*x0Y{q \LH K_ '_NO0 %<;eUgy{[&"^\bz3㏱Bw+&a,6:XbqռJ˓f+K¿,So|ΞaOC{??o2<_ &1uJ.&ۻ*wj10"Ꮵځn o|mO}']f&}Kc](&(#hUR_Is!$u&C"ށt,)`[+EwRE$903!1\p\b3:j(ET 1!rIoމɉ~N2eQz-z$RQD)HdITgd HuE(s^gCmJ֖ȁs#n9<.%V5 %~ K/;4yU)L|F`qbDloyrF >CC@|H*PR's`qo>{ #>=ÃY:#ڍ!0H! If6 0w?8fͿBb /^t:(OW$a9m^<ZMSUޔD?})Uk.J&O-o5ӻww,= VҰttq(&Øb๋a0\el>qoHZ\#d{a>,8 !쁤H5?MƣO?-Pf?^5I0J/Ce1:u aO?/ڝ&:yV*f%},'-#y@eAП EfX-Oٯ}5wlؘLUfCn)W}N)Azڄq$\ʟ;)4rc(¢Go_$3,9<̱ov\Ͷ~Qx<}X*I.|kmvbR ? ~H~㮽6iKIEv wggZXEܝӸow.&;Ý+i#Jˊ+J9W+-S2IiIM6)xDj%5RR#Gjо/U[2>Tɻ(^,<$Sf$S ,#jQnD%1R5H28GTT*&QS}#<,4kp)בSP4Z4z C"!3X_Jdz)YP޸TpTmR%D%2=Ȕej,doUq{{|n/YHSf$R7H[^oYT%n% Ru/UAs-n$nwbLdHb,Zt=>;YxTxҥX60 U3TR=Jkdj Xk)(2eD=͐4i~p~9eD=Ud ېLy|dHk[wDRI$K"(QH 9{Y> 7M&o julii]Q܉WE֦J0#XXc.𜠗W9_o~|h02^ߓ1o_ⶐ_շx鸯j6|}hξsu4 \u|!`ڡȋiLmSQ$dZ*hX#Y^Ƹ#j?U%R~YTs$^a70K 8J$#Z UV 5{/>ɜYn2g׿?}6Cϝ܊MvkL]+E*eۚU*|v:g-;=2 !TUk]ܕ+i<8hM^M80`ۃJ\o&=8#(q#)#P=v|TOxڷ.<=XţpCZ:o9 V#pȠ?pJq:kәq],Rt-|m,1hjֱ㛾?vNu+7U *@QvB} 7@X;ZpT1 ]B˪F UAc@Hi'jH>bG*#yhKySpPϑ;RA<klWZY؀5MˍrV\2԰l jH6ڏgTэ* |a?=u{Q1f$Dxx0XT))1;Efmq#(oe* [tM??iBkƒmՂmǍd#})3#BtQ$ LS-sڊsJ %YVI5GɗW'7=9N7 ̠Y{HFGd[kuMq8^p4\/ͷcW`!tr<\kVMKTk֦#jmkE;설o\Й7yGH23nm ef$ %F$aH.pJk< seb'd!3jt$+ `8jJ{ȬJ:__ϾVҴ@I`RJ FnN$aRr/eQ-0[r`\YVʀeYQ h# G8vHшl`d=s)_,eI}.J"H}JYPL2`TY0Ps̛ū*'1ʳp:/FEE/c|AU/uhv))GPYp٪ F-~ަOk(eDg_0#᪩ʜcҜcB9da2^FX.FF278'*S*0헚z:iSH+a3cUƌCVL>S3dl6Qd*6,Ll㬯{p'ƍdr֖>[ k./O LCU 1ɬq#ِ@&S -SQPF|[è f(%ې`$p6km;m*ن p453umߤL$޸RLZ/a//aO_=QAZn]5`9Vuk4F7Kx@4@ ajH>r@Dɉz6D7HF*m$vɅDlv4ekݞ8LM5Z PiE3kA;Ă"ez5l;8:QŲQ#5Nϗc.DK_iAd4ڞc_3tw`$\Lp;Wyڧcό{%ڈ3ʥ$Ɇ7hl0U[2ۻvꕓ1:Ɣb#PCkePcjgO ;}\ӻ1К&H>`Jw]Ш/^ -<I{@Cgغ$hZh gUMՖC]?[c퐀/nQI5 !\l› [ ľES(l4"H>` 6{痚:| BO>;y稌|B-a9X$qkH6rVg5}|<]IN&#m I0 EH*+6Qug'a>'D ) 7Cc{Me@҈1x03 C e>MF9+KUɎƶrtF(ػ RJ[fKw8%ۚ`$X qmWI⍗ϼaAJ bgpVaBF䃐mABqj!ehb2G3J93OV: UFA5uӘۦAilT] + e jÆc64Eq벪f$~.۫\_ɰR,+ҺGJBըlP&< ۸QɰfXR40H984! RlT7ZWG{UBfcu!(_6`p![)[U%(;ʈZ]\#*@J?Gm'gA Ft8]Mwm]W$wj:,xJI[ I S\hHzH6'wU[p/m4_eq%ѧ5wZu..:]NIH9#s- o;둬gH˳:)5`h6Wȸ'W{V+刖oͿ-W^AwM\.߭A.^&h.}}xvף^u>mǸas3H͛x)h p˪30{:Yus#6Ŀ^slavr ⺍z/w^՟?fLE^jm͛_ow,~?fp3yMZ2-.ٗO\P0۱q]/nukgOظimx+;%r1Dvcح![DNdxOx iFx2A{m6΁cƔ|-gs=e T> k& @z<`̀2j$xeC[+PIw^ y27hpN/ J2-JBSKEZNv2zmP~zkI\')_,>Vt|YU%欛_w޽Z]_-],>W|k\չ|kNٻq$0s۸m vsH6N&~EmҏnےbbR}֝4J o/:߼ۛ+Cw8׉[xew_Ou'e"Tp K9xnxBd䙂LJp&qw[k!QbҎfmigćeTy:u]n~{EN,s_/i_?ѝ4} 4MH?֚e$+.mYFBi>ZPۋ+_jZs_k &8f'[אRۂU*LfHL hm(SdrQgv<9+xc5i%r`-:oi{5Kd&Y29PWy2^K{Wo3$vf]!|99ܶ˒߼ۙسv6E[`d3|m8Lo_?;=V=A%użI0^6Tz,y5ƤLnP5R8fyUJH!P/ܩ9fX6.!)KMqq?ߔ=&쥍}xD+w&@JinHÌ1P&y⧞*R. d0F)$]劉Vȼ6\21{iy,cxht({0%0a˖E3P1*)<ŽB|L ͇I Ou^â|̌Q{λ>NVSgv'pgu6cvQ c7O;]OZJPf& TQٜP,mL3ZY$+7D=!(3 Mf4I퇴EO" <>9hJo" bTE;87 & @Q =r*D_@A*%$f PB̹. UJm@' ]no![A8#}cԣµ?b@)MaV~>nl.|Ff6u3~|gufx|Fm՘W }ǡU;e k`]4 ϟV ܔg֘$`t`U5 x*IS1T\19{!&r`O!Yʳ 9 V嶈0#@O d d@e ĥ2d:J5xI)Uu3Hjl]H0$ |H ZGCU`d\N ތaRkR) N'eA Z7) |+x2+- OaLų Ž!V؁|»NvÎR\eOQvPmG>v$ŕNN_>)v8PRX"iwԵ|R/~3l;}}[/U3 \PI '2,}`Y #gG礓i]%jRFf(5ՂOki]XI*UWҥ֐ u+*E94^Ge]D)&>"ƬO cnZn5e 9dm[צJ*#@CnƸyNs  D= iaPMeX /+ûV05 vDϰaKrtKȆ9%DSF1)Siz%FiEyrFV%V j).[ۙ~樔R4MטRFC]6@LMڭina⪚7 I{8:;ũw-Iq&ͦPLJ~.'ɦr .¥._o1*8QV^(߫5i,gGUOlV(CQrE; un]RjَL1cDެk0[$sz2daW--9W oO'=t_vcS^yyI%xav3]IQ;Sf?n'-o+GUw/"4-[9rZh6`uW:+]1N&_ܺ_5ׂxe@gGnUeAcdau'dB@U+Y꧞Ց֤Pb-^ $ݢ-B^*,n-vo( BFrW-& Rj؉.K7< i& =j+/F"cWOU3Z⿠Ԧ@hDhB z)AҪ›xГh#g\2j)1e&)DLV-W$0S&KA1KFՈP4CLFIDFXCDk:l'HLƤ=$0(_P̭.ř(_@1ˎh,hL5ޘh0FcbXc-_V3mʸis ؝.PMO~8 'iŀNA$N3JVc(+ޖ_,}Ug8׉s />ZBEZx=:kl HwP9Ӵ܃ %351v$fuo WG%;H:VZ֩竆[:x Txs.ZR9\Ţha,Z򢅥a1(L31=J@B9gE K B gnTTY̳#HB8|i{gf~&j`%AY{qK֖ZYÁG)޴#: 4] sV{0H0q%ZKhe/.+fRcK!/87H]ѥGJfik{:blht)K+Y< 1w21^"yc̳h115S5e|)PUO&'њ}0i:_1hEQӎ6RZ}Y!LfC^Tj1^xyK[#c</w-W5z^-%8fxcj0î\w8N04Kapr+.m(f qr G}=1&c*Cev *M`;)#<"4]<籄L6i^A4=3KxY7KhLKY]yTN$R{@5e50ias4\+m]>:=<-r@]> Qc\xb@?W asgwy:Ŭm<1$mU&/5q'-,F,/S>X cLV[MbLni $l./欧k@fPL^HS~qְ{/_͐_8\d6Bkcv!/35MI<\B짓䦰S% }N.kn!S\J:|)fgM6nѲ3sn6h:J+`jZ- qR{vu\왆+IjQ)E7A5ZjhPQq?$&w2ƿ5[⽰\.^(JЕX032 7V^*@<{ٕ^QzMb\#߂`ϥ{Z&SjjW]$pud=&.gBpJ38 ->W  L {/KMP, g51P͛-Y +0jja{vDcUPEU1 i K>Np |GIY 8Qyܩ|0r4kwZ֐/e/C//I1Ka\צ]U>O 9'랺wh_K%/rѝkRggF!DU˯ܯ_ԝ{Z<`W;e"lΰC 4W߿>Je>.W2]_ev6Yj4 f]5rgܕR殗7"~|Pؿ{3ίqP}]\rwUA{pŽhw.%-RtaWfP +$7*ɌG umy NR<.F/<<.`-)lM _BRp5ZeաEّPh{(G.*_UL䝷zĠUfAua;oS5VSTu ߫RB6C%1ggmZfEni-:aVWGg.] -EY%խtDe6kSTp$5*8;A)^4*FrQfQ!EpKYU. _)bvry4 0k̰&vO:6*h=;gEu[qlt+Ip4fV)1Gp$t±[/;cΔODk76jS`qœRSX[J)| yA&;ZBI犪KPy6cqj}/Q.j[`x-t>Ou0,1K`5cVuՠYV* JBXUЮ?`@r+:ظe(.}%:  + .y [0͙"ތ<1PBv-*lMnf:|lŠ[ngѯPo=YV% \A,)oLHu@WdQe-ڬ-" 2$r*CP:vK{ʨ lD=v?9$bqsD3×^V1El0NN8`W=PbR1rڬEʐ}zr-'J>H%'Po:.p ! s53 У%X1U¼~VsurxyE :0<@PT PhA6 Vaf8BDŽ&fr䚪 d8.-*ah,(;u˝hR `ϳw *2mW6 l2Lpg̀%k!XZ 4kM.#J4q!81*b q.61Mq}lt TU֢^Bu: !YüJ.pg̬Њ(8D@-Ia&EV CYnDuQۭhm5'ѓi9AL=(2+M +RTְ5Ţ?QD äJ% ?6U(YGbSʉz=}66hE^,E6hm| 0.):H\x6)Y¯0~k4XEAymqѕ/ђ)yE G\c=e*ki`wݼ*xْ3&p 'zL{z$1I[5)!,@?nRs`55Pɕ}4ڪbWY4S=Nȳ:Z2Xd1r)v/WnܺF1wg0cCp R}Q:V4401]v Rʢ1$t']RD,VAo!: l^;cJmkwtӪA*!g0'֓B$}V'.4 {SvY 1 r=2@ ].(K.rΊZG#: $EF4PC2!륈bW4TY l, `Mtt` ,a%e H`v5v8IA>i3zNW}q3n^۽a1A(#O?\)g*lss~\=e# 81GPĶcs6_v4bۥwwZ m>m%y#[jzwmt[am/n&/5N^Nc ix&K}Z]+3@\˃OCVIQvAIz9IkR:z#JIIIIIIIIIIIIIIIIIIIIIIIyIC KJpXNRpYLRhOX؆M&u@ɏHꌤHꌤHꌤHꌤHꌤHꌤHꌤHꌤHꌤHꌤHꌤHꌤHꌤHꌤHꌤHꌤHꌤHꌤHꌤHꌤHꌤHꌤH<ߤf]%%u\[NRpY/&.^;Hu*ǵ_I{L_=g0!>qAp~3L!^AHSiWMJ:8?̵UEi&ඪϠkn3Wzo>FR T~3wMt_!a[]![]V-T&"tH&{pZv#NC Mz!ڧj? ٜ\ lT?}Iggק tu-MPB/ߥca蓃ُ"İbQYŔLj /,홀rJvw)`YBʝRFG -lKa]|k^lk \,kk:hZ+oE:+E[}eͲۀݘl>2˽k^yrAl, vQ+ߟ˾;GPcph!0 9W_~`OX V?bh~!OAh{i wk#o5Tv_]^__HpsHwu"nOJקҝhUtsKaozy[lWT}8ngv!>D/,H/,+eYX> a)5q) +<= ȒYXtz~]`_}:\zg(Kn)OM kJ0j!`2^cBMXJD "'XBie?Xb6ZGi)5]koG+fP[O;ɗ  K RIEKRl%ݓât{i֩X#<1?vI'X/rS <s$Xs٩#X4+Dd XHI7FQLZe Xqf"`=; zsl`kxzsVr9{Q<,'. ǃĻWO!;ΝvguO% x`9D[iDjK'`J6pVO҄ H\'gZ+?U@DSDjm X݉w}Xa"`0'lxHqb vy& N`x%Gv=Z;/{+x_O/w<>Y'v=>yhǢnB@[SDٛ[[H9^ޞ>Z:߫iͫ~_gXW|"7U&ոaq+\9nQr}Ǐji7ȇa~$;͒%ݷ1_O noOgWs^w!8̾-T[ts=]} 7Y{8;;̔!qvW3?#oԾ ïg]x?+#yDrz ï3p,jߵz|>xh9+7O+z?GG -ҳ]?YrZgUf[ T#qulTAd\[ۙ~`N[!iZ쓒72 |+Gc$RK#}tݫ9.a/zmJ-kVLҮ\N 4W͐&Rцǜ?U^k*8))T942U]8)DƔﻚRƽS$ȟ2|IRT xI4J7jΐQ"ZkR]-yznhjbED+B>H>4oȬZ*bS)J%JJqw3=gg2QT!)2Ȑ)I GT j$ )jSy'b{ M<{f \h&+T bFPB!Dwb} #(D9jԟu6xn$jlGG<(lKŀXIZ7bX|~'Fs}ayL$MKH1\E!hEY'kdDJ2(HʼnN}aP~%!n u 4hɸˑPi05,΅9[5=O7ƗBbi >QN8O6;E"舊E 7"6*fc $$eA^")KHvm2J %"aN;cmq)*dB4oaOsn Ҏ:*PZ2z۝("5P(S y_DmL2/FJJQl@8 G}CLzb5xknctX^` JMT\1;QmE!YEGa>jۢor*ɤ0R9 NhD.P`8F_a4z1/|zZU+ kI ѓ*yxh&|`1TQA+R)=3d$$+frIPO8"Qy0!  #t11Eo"!/ZM!E@6H\vʸ\g:P\h6)ʵj!zߛya$ZYCL%#5aƟfB,-&[ `^q+"䵪s2`A PAq㏉ o0?#I[FrV(?ZnSjj(%J-ڙpTDfa)'UIrbed&*Xm@i*$ij􏀕>v0mY^v/q@K;%;fgǩFMƩ|N1Aw:A#vNNNNNNNNNNNNNNNNNNNNNNN9S)9uj5õq0t4ܩN:ux9ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:ݩӝ:_SnJNb=? q _S(NߣSG#NNNNNNNNNNNNNNNNNNNNNNNǩ`.*^䥠|?_A5z.oIVT7ۅwu^^].\y~lx6}j&֠X dD:~'XB|;X`~6 >1@(+v2ju54ojהŲQvWnUyꎿ|'W|2i*.f?QK-G>z&w4]u>p8Sp't>/G@HiGj{)gmf^,sGFBvQ3m6}xw/~莯3>_n;뷷&vyeNWT1W tPBW(/IPcd`oơȃ1?/9\^W3޻\ ~b~^7^Wʹ`n0[1s{7bݿ?P:Ǧ-Y??{׶ǑdA.L2y0v~Y ȫElzFXDw).IՠY,]]u*32DVZW;yo(_ީZ}eBZo-&ZXr<(_{x8>9얿mhy4tjgT:ݽ0DOjR>ak@e M =°^XRZN^ XGy`| 2L\]"Od6mKW3̆<qH!ρi]G y`R|ِ}:sgCw^;c_ls߁ِgu5fC0N%t R2&kRFHʎ ֜)r&Bj  m]X+W, QK1RZ (R  jjRB ڻ0{ X{,R`]#/ ZXKKX6 EKˁ^JP!u:DxԕY9 kXkZ XxV-g 0QE=s]L֟)&:2j5, hvګ57EG֯ZYn9T6y2At'+)yɿ U/{3ov?ݞvwޓ~˛YvK1`ť5DK1Zst X9rBv8X<„5v +N(.ʆ +5Un~fOƯB{ &4o#tVmr{S/eooD\l1}sU?}W~-ݭsK.` *}jP_>ooooNv-j)}]߾]P'}򗫇.o>\CG}tnx&hV}FZ_p݉_^}uev^n.꬞`w{G__se}K;l(qUQn|oP]7Ge7e]7=c3u FSl:zvtuu[ZxaY&[]:_XP"gXr|VzWf)Yg͐ޟrއ}o7v#~WۻY_`zjW FF;;QqG{W0|EmWT1.HQ~a>/hq: eVmg#7HDg~N0[ 22CT`BII#ӈ{Tk5wilBnV3h^\be ^}dS RK$A4Xx%ZYiEv!3 0 JHbD6r,2z\#GC>@0qL"\6!c02UcǀIZL`N*22.!V52LL/H$C#?7᷊mܶ+[6Ip3:6 Y=\B`WV\iΙ\-J4pWA|F YUq$BP44XF^I)nSYf 숯|_uUeN,qw@ 3_P3@sBHVZٖae Q!TH&V -M[5\Y4LyK*gwDe~ Bfv]AĨfe._4ʴDdF~ 'C& _Fyn-Lg WiE[D4HM2g'ܫTYpʬ))iὲR_kd~M.+"Zusjl^NMxIaɰcXs"FLh.h6l4*}@Nԩ7WebAR&0W7X@߁40A(9x|ߠY/O?@oMۓnל>U5 ;3ecJۆX'τRTHY1kFdXd6TIH E~]`(2ES8 ]~=EvI^,r5AkjJ\Ё"g:Қ"A/[G`iʲ=N6 Mm˓}LLyM q7[_kd=(N  g瑶xήĭ@$$t2Q0kS IIΘcWQ#.Q1DS|3od,"" [8ޚD-^dIcQVA0f Ѹu cd0b [ t3YjJ! .+m!K$Ep I""GZ 3`Qʞ7=n+#1eյS(#êA p 9~LD¡ϚI ʒ"OԾ]`2CAƩ J5`3 4w]$ʴm2e vI^9FxܓB4)XM"_\RB:Pxg&ɵ( %RIU":KT)0I"zJ77q1n]on!GԀ:TOsUL9#T5g?~.YΧ["1ӧ`Įh ~] OE.onQznu[6ʞ/*[Օ,{ooo^molT~9f}~tq9.V_Lowg 4r&guo3K@\wO7/dA]k:Р&IzВ}J8P:;:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:Cs4Щ%)ux9JvQmWaQSԉbC3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:C3:Vy R0b:R:֛׮R%JJJJJJJJJJJJJJJJJJJJJcz8_YyHS6AH/?\tmjIq)G3qNOM'u>IO꼜'uC>ZW'RPM{_+xnu9p԰~_~)Կ}W +7Mg-"Kv!`3i!`jƒu9/Պu[MBb,I..ltwfߟm8OĊ%HwvgoywqU o<ɧGl~* rG:.ñ@y806.ed,/,;-']@AVȻȼX}1͝޹-0@>׷fuztJyW?kp~rN~7|,_ׯ'\_#!|'ǥe@LZXK1[XbpZXfˁvSXhwRlV;o>+PZWW'?`n0K}m._諻ZUE&|ݺѷ{`r<`O[=Äy!D캵k]-׮eՙm|q]~Ef&q^OZXb.,-,s2~)`=CW ZybGG`S2|"?{E72c,Db8R\-y'*˯IJփ,e)f@>,,b82:, V"/lإtxﴀl/< t 8O0^Sߛ{%-7u}7Lf/}Oߝx%($9&m^^/CHV; 2>3 8F'-Zm`$weUَgWWAӏ'?n+Uow dK_ W[|zF@>8w?pu~-:p%^d k#os8X\K1J$6,,'a)6 mnRHboj;{5) SG/{W;~¹MsOB\P1jʂ>xu5kk-/ۦ w~K7xcQo<kv6h7xb? ZΩFysc:gUta s{wޯ_O}p'@6D27 v=ǶŁ%W_l< C2>)AELBъ}BQ{k)-=nq5M/=3:w=K]N,uw1/>'n?p,h &9yt &~ fra,oV;;?߶\xhUo?tgdӪc]yQzoP~$Բ9qC@4Q1Lm|~V|zn}\ۋ|~z c(KBFog[xs\n8;Q 'Ny_)gݟ{Y-iD?,g#0u;Ra%M40;|fKO%F]_\< QLPk)l[7yMq%-+cb F鵊fSadSߊZ6B0=H-k۳q%{fL٤O}* đL1#;:XL bvШ?!sGm0As} inbF΁C-g@xdԭ>;& E^#~5AZ6I#ڨ絧&BmsBz &!amr∈)Dk:uӿatsoyL3 <2F3e͚i>xk H#5/ [ p9Px'F"<6&":1s I7QvdLqXmc/)%g̼]JE$P$ ̩D!4)q)^{?`o1eEf(RoyBtI#FZƢa:oٔIM! Ҭ=`Ѹ=^^Ŝ"6EJ녚TfFu4;F}\tVq@2uZ%ך`4*vm+Ciwn1n@6GpÀїQG&62Hağ+@et0A@ڬ-2å!dC[d 1R>S a1s_w5 sOv(P7+TXb$At`?@5C:3ojݕk{mӧ"c‚5t`=B?S8XGS rYAft$m`Փ` 0 ;ʨÇ K|רkIVkDf ]aWXJ5Cr+: HD$#&=97Jud=(tA Y0lzN8GuYthhݜä"uDŽ]u1fq u>V#S{SAZ Au*3Vb,RxiP4 >#:°%QXrR힁)l䛇OvLQq]tKcKB2u6)'l1`7T!Hk M0YYWrɗA*چ&r[|LenN@~1 I`IRL,f)kzWTEe,92(:ZR Ѵh#; EjفQuPdu %g 1gTT[$5`XpUq)OAY͠{#ъ$e+b/>ƠZ2٫B—uyMEw?T9[_=ZFɽb5dCv/.22 -wb!Yɭ@"?QQ a25CA$ Eaf>vs;=E4"/ Z!D@6X%0\.+(.4ޙpޏ>Ffh!1 q͓R"%SB|KPz_OG.7ɤсy3,7ĵεt#uP$z~$U %lLRHօY3F5cI Q F T8ڒ$W}wzc= # g Qz^7$YDQ1z+ ;%!aa#8x3* n<2\*g?֒DB:4Pf=HA" Y Fk0o`(=P` 4 g\Zf@ uxh6V\ $R Bhb;~Y"gUȲp ! xH. ^FdDB3:˓.κK1,'54#4dRY/'#ƿ!ePxcg6@&+,;%7U HQE]i%I!u"?H uZTJSM"u L"uHD R:A"uHD R:A"uHD R:A"uHD R:A"uHD R:A"uHD R:ԁu]>!u T}A={P)=DE83A"uHD R:A"uHD R:A"uHD R:A"uHD R:A"uHD R:A"uHDRgWH#~\|RPc^W˂/#QZwl\H۠ƞ g 9-y N ,zG欀E"E56#0_(I'ayp\맍+Nc^L0Z#(7Ցl_+;ݿjoп2NU:y\<# 3{q7st)ozLzF]9(}qWZn]9*%Ewݕ;; sW`m{]swWJ$tWo]IN={䮤CYg 5qW3~ < +DܕrqW@^_Pkٻ+tWo]Y!ҜGy kҘ̿zi*8[,NFqMɄMu靫.N>\={K,~bzsuYeV/k+ՉRe!C<ÿM&?t1HA,8^;}^ΐ;5/swZ+..d6d`6YE> | FiGMHrX~0"Qm~XCyhkrz;AXef).*g~7UaZj_i?|Eb$%(Xrexe\.ZIyۯVGs.5R]Z,_5+klّ'Y[/x|+88)'U2^?WGb78UQO_Wpn*W$jL0 Nm LQd'8Mdfri8ϺאG'EoO7P cOBv*ףiu` (2x]ṅ%n-tr%VV b"É',hxAeO{$5/NʕnjGq6/3l} J*׶lު:`h/[xCO:xf׃qn:gU~tr pp^MЎ_jG91NyyPDz(G|:Xnc4.?Qtd8`2?歝ޏFFqzW҅pr72<]xs.z(jPk<_w1;E_fuY W&+4}ؕ2YtkδpZ0+)>Wȇn㿻i x>g׋AVV35!q'ZGnqx7K_,gT\fSzo~dMH]hYC/vfXa8TSiOF[;8ߙt ^>hS=] =~ "^#Q v-yoX&XH"O| ,t-X'5'mk^RzIOh2DHU/I늬C&A!`o]ioYL{_W[Sů/L1%n7dqn ƫt]qPOV#,r}IJ9Rʫ !<3 x (TLSqMpły/=\crWHVGԝr[YƁZB~'>D&R7W]9v}9 ::Lr u7VZo|#֩s$ ]t2L3ŝ⊞^J4Zl/|RVsc͵/>;x2?{"d2J+jt)u cŔ$0GZhqLF晍S) 1Wk1r yKNf>U퐸m<}T)F݃sSk.T j_RQpx6$XHD$ /H*6(X0m|i7m'}He(#/Mgk&oRrO0&ZF _[v-WAFv2=/5s[JSP#1v#DņK>F0c+"FN#D/xqC`h,p%UwÕECMoN^.j]zK}W JbM@-SΔL <&|{,^$ +AJk+ Gڼ&~sm܏awnk|{R\*Vݻnc2!3B89Gu/Xkfe~ZZ5QyaoT T4ʣ(LGv wޛ\7L{|Gǰoos̝#lb=b-\cʼc\cl1UTmnZږ+iGVV_y~[a 6֪CwkJW<\t8Wkف#(Nv"Q іr"P+mB#A@CCNs8'Sw8>+WEfsY5mTTy+ |^T!]>2}z>?NNpWd?f 8х%O+ei+M| y¤uϤ0惺r.)ց/XѾ[zqo;]3U^|m)Cm={`lqb/J$"Rc`EÄE2ڇuն;W9Oae]ȢwF|j P&!KE6P$0Nh\5H/e}BOHFŅR11ujoKn$J"}!5O/ 44h-1WdcAȏ#Υ'(FI,I"/Ad,eQȴˈ0`§݇mشDKCKCK;`{;/嶞[n6^&l;Hƒ B#\\P(@nMS'~%_wdPy:K-W<6:1t(c1w.ltqt~Å"&QVG1_*e[/8D*?Nm14Kqq҅j͹>MW f8j#u;Ɛl6=v3 qW?cXhxl?2|IKơ[p:PvQ)eH,;{/3?d2?g_!$2_YS}>E8?/_]2ܳyD?Db%~Y2B s,,gvX(_eGtb&~a>h_A~e wHpݒ4ukF36X$X"Y$>"#@S-+1ڮwE Ec4J8 w[_iݠeNdyј}{'%iNҒ Ed #m4;9FOs fpP(9|M4gSA'2;#38ZPI;Z2:ζu‡ h!*2S{[xJc.hZݳVRfc.ޓ[X9<~Pono4w+1}P!"IK0p+E;ǫݯE&͓cʹJsnZn݈;LC幼1s%8F$2@XF-q j 'YY|.C-#$RRJ ~}/֎ܪ[vFa>~@9}`tldwZǫ@GƓOgDUeD`ZkQ*Rf(9躷wMߏwNUޥ)\m}}BE#Yũ%j6^kv7fwkB%][?P ]~tn:S;8#[]s'6st(v>b]4ل/K$| RP'|W`]r7dv!՝=g̒x˘%wJRq0ߞ.n?z`pr'l"4Ru3PmX}/nWr <8 ˩Nr^,wڜ*lCmJKm T\JmN~j mgJy|buvߛƣ52$vND헫/b%p?>_]ѳUK[~<9lqM=P6R= }Äy<>[N{:/YPPK.KCPVFlbs G`J b1K-uZ:k{#/nC~e[NgD,V{wqܱoRʆ&d#Haj W՟oț?) ~{iw:;Ï`,xL?m`Yz&zt#^s+`=@j-7ΆwEU^\*B?d7/ 39=]M3g=Tq}^.OAXJ'i騭$$h쨤 (cB*+L?@ZnE0/tI:%`-Jzq'n1ze%V*q(qI%@Ҡؙ0ݩ B#3W נ%rrHPMSu-ygUw5E`xd}}>%iGۍw5(r'/*c̖"mzc~^UMfxfƶw;BB(:w&Ǩ9^Z4qDB9QMkf;()E0PȞQHECzI{Nkde>NV닾^` N? u3zpΎs ξ;i`yܓVo >vlP>0A5 Wl~f&2jŊR\EĦwF\(fIFI5BJa(~MRfnw[s=9H}Y:}Ѫ:q$0?%aq}J+`v}7ַ$pQ֌`tP(bgc4>g LУiOa1ç/0V;?:5HsL[4|2jHo'ަ?M2.&)OsIo=p#ZsbͅbZ& t_ZzrW$1tʨHsIGƔa[&45=c~F7ikX;J1Z'BM~/ "n n]mou|bh&upo:NkoG=,ك빵PfZ=8>Ӕ335 Omb{{vUzU:P^ 0]KPBZH@,LBb3 +b(r̂5G `*;ŗ(0' ;$uT +Ayʄ\hE c|;ŗJcw5=PdP#X!.y*@cqJe (r0z9̢ q_5rz94_YP7SIh%.w5Q:|ک*)Kڰw1Isyv. f=IBp᭲$#LZ 8X3E;;u ix4޻C-ZwǪ<[ Gwr ,56"%blQ(/ X{.PG d[ M5iI=Z g.sF @!I fNrˀ)T)ǭ#Ji)XiuV}VzacJ%I)Sc b$96&*±c;`UoF.U\TU}dOG #Lcj@MrKgL6)oaF*I$fe0S#]ǯCEbah/ 5zxD5 .bD@Z s{*JSF{ocAݐ>rt\lb_GAjcZ=` \iYꭋ`e9XP{;y\,Ui׳mer1A[iI慢X=JļIBz @Hy72ỏJyi݅oYD9Cr} 3;XwORDC҄YR+bTQ4fdV6 YAqYC|'/_Ft|b#x%]γ +"*VE:xp}덩_՞`R'%7{󙩱/8o &23@٫Ǖo5$VQA :S??Ygr.k%IBHS"XF-A~W^ϯ3h#]>t+ ?Ou:;B ,%kd1MXψCJB-y!ԲmH`ӵPw>2t6d D6Qs сpЁ6"/!mEDJZ F2 -sHGnQa}f8u[=.xE ՘s u>E4{T&KIޠDMDX`DI@Cv.!-M~4Ԙ.>TEF@lc-9_)Rr Qs9+$7q5jVAQTpWw|-dk)iҒ9g1Ԩw-ڒ-+V|z{cgb:b4-eܓ7qQ W,EYKˊ>걀mᓉR!D3sFc&ݩ}o7x[1Sq61 7L_r$8\, hSZ%o5ZVX9!F!PL"IDA,% f0`J6.Jr.|lԆL:r"=VHQJk2QTXa^ŷUrhAA*%6&Hܵ6濲[Ԧ)- jk-I3>l]ےfKYe-֍d;c[Q͘=5ȧyFs I 49 Ȳ$`iq" zA;Nuו3=OUZf~Qz7h=CyiPʡ?ˉT#)['-(= r qSUd׼ѫc,XQOl K:Cn?[ -PSUmׇ:Ww1Z34ÿĚI`g=G?n"r=[u?$+ף7dJwF=;wz":PpAu ܬ$l uRI/xd@Q='0ގǭVDh.]bHF̭RNs+1|Z\\} /K i$˄LY8Ȁu9w!|g<ɫ-~E VÖXa P8M}PX+1{jƑOUQ-Ԩlm/h m <bAt`Xqg@7"j[1V>rPjTƂ(iДv3=0PI]DI]nw/,YT%b%dnk\e!wŽeYvV.V1h%舃]K*fƍVS+|9MnGFє(XO3^[:!|(&]kȧWT/%ukXBk:h&2*G>UҶ+(p(Œ]mwy1P|6T(\b(**%1邈Ғ O^kP$Ya] d@i4"r;]~cӡVg Uc0#"2ՂfD . ܰY._Dq<|gXǼoFIfpV(zë6-\K݆\4Mrc.S.|rk/ 7h nmI|a3h1bF!zP,|k'w&~Zd(qjLA 2M$Ds[JQfNGexT1teV/:N WOQG4 ͚*4BG%E\C=I,2V D (ڝ1y߼X%%;')N=惒*Ӝl0d@ "rځwxEcvPZ$|0BpA((|R͜a|3u;C×O/1\?LjE>UѺa~H>(JT 1e4K$hvEZxG?"r;}X.= ? ; ǥlpɼ ߡHWTHu 6\g;xQ^MN5v fcpCXZ)Ge^7 Vn>DE‰3b:{vC GX;|& )T7iT[SfDL-y}Y.bL4;d޲[f潁C_IXUbGkwf ӒQN& y]>%G6`13_"jny@ңb$2H:cr.w U"S V0ApNuZk?1Ǡȧ*uƐ<0TEW5ʒz*e0D2,s"^a֋48vpa{,/`A%n hYߜXIH=Q_)#>f៮_y[?_6W7_$-u~Xl`zGӼgݴMO\54 ah&}e>Qqd4"G ^m2 Iv $Jgg5G(e{ȫ6AƲV{c~cN<`q}ZMs/?_sOQT;>q_.,?,V/G~(eg18YM;w wY;aݍ~] sxn&#4( 'Y]lӶq3y1N=NHI{03H(=8~p=xh΢?/x 4$p>xpoee”8a{ήχp.6PCٜ02g7_a.1Ns"F32VUQTR9ۧX3LwQS)2p0Hp4 Ӥ(GՃ&qZ+\9{\0m+= bZF4$%eM,q;ɥ{D{,bfE='qsR1Uї_[O_.S|Wų?Wxh3#~U텢U>2b;{O*Y,T ZCqx/`FKQP%oc]-jےt; +…Fd3ٯuc>?:c{xHkʎzrNX$-2iv] SQf#4(btLawy1Lҏ{zUpCuOx!qI3O%❒i׵t0X)Q|Ɩ@_יjADN&)!cRZp;/aFW6|gC6WFo0mTkZ hA#ͭH7;,VԴ+Y C;hA_F4hU{q_ïŌP_E5$`X-rsD#˲pyDPs`kh%=SNd<^(*WL6tK CBzQ(sVFb=l&0HeҌ{peqR/;f*̠⃌7QSO+D@ڼ,WooHP_|܀c%*h"rL{ܺfI8D(m ߰hKWY6kx4$T|ƌGFPUkU^n"_0€+hlQ[fZQ_ОvqS+_n7Tnm'X`p3la'Q, s=i1YTjދ,*":"b\t#^LYϻ vQU)G//OA#:’ aI0CwҫvjlM X 3vkaP#˕0<˕1}dtdwy6EҹJ2PI zrhUb>$܊`nEOָW#/jk5{#BQC=j$ .Ѧ#hR: xG)!Q0H3"\%c_qPS}S ̢-m p ނU#Ɂ:l \>aN2 މ#:c.H~Yץ_eb]w(ErX-$MVn`kxE]#x0Z/+ږP 19Y ]I祴G z3=믨owrY/{Ә30ƀՂ2 Ʈ^l0vFp1/yK/q.l]k NTw-}ĦO^ t?ZS )Uau=p{ dIg$I М~=wؔA-?#9B!8!0{S1jV*=AL1ÃRf zFo]fԨHئ;lz%Eʼ$=f!$30\pRMdf{/aAa 8,AIiN65`ϰz&wFZ7?>3j+^_A'BY\ *Bc` >f%GI޷k>\E>I ] ^q ץ yF6{c$! cMggu;rdNL$ kNƉu!n kcQ$fpE>-lDzjv ՞fjA 8AO#v DlHYK'UBV8߾8( Zi @ƓS^uzLd~a@ 0'*Y)^S!B.),(xzpgɫOa_ٯOkŎ{ !_o(g+RFC)7i$B K3TTx-cCQ= W/qQx;}>d.4ywGjO-y}Y.b 9*y٤֨vpga*gB:<)bV u| hGph "MA )xwqdsV/}O*([Qf)1jI8).|hFräȧiQ%.(-& EɇKa%f\ E>U3tD*|XƩ=M=||$| T"np㸥36֋޳W|[`ֳ$50 s7n@U%+;*rmU mH(Q$E/rrs;XAiO;G1V\Qn9,mx{xO&sdq;z͔U\UF3M\&J­EM&\ '4Ga 97P 4C~|8p0F_6/?i*.!$ *3.VULVC<#A_R{-?tW[|h:/]e/泵rUC<n2 pD[n`5E3Ov㴙+ףϮ_'~V.zA o;=_WC5(?ɪcT羍#sz PN6͟ U1mgXcq2鎨;qb?rLWW)\ZM w* ȤY` xusB_0zXw\hTc<ͺsf_J?W=_Vm\wZ&xє~=kCǑLiUCuۥI+~3q,"jPmyݬ׋+xt4MTHq>@7gףb^^ϗ߿ϯ~z/2n޾/duɂy4 |3H" u!z̀c誧|ˇn3FV HfoF~~Ve*)lk?q`&;җ#7+G`rFըؠ8Ŧ]p&Xu u*b@Tgqv}Ůx;ok~k*BhqE,j-o*E{QY-m?ñ!XԺa9w}[X &=4ʩѴۑ)ȌiҰהzVy33]E'.YEIޗR罆I>؉WU2~e_89Cw"u9Yxl)UVXaRm1҅׷Y eaF\~SF3q=y&fZkX;yxfpj]5^xf{<yHȑxuod0ʌ,\kSƂs!HCif˒)Fsf|Kx_.;s#}O6D(.ηwU{%m <+OF-nj'~ZS+aR~\Mm,a@Ëu]~'~9sQU.j4/`?AGoꕞ1i|Of0mɸ^<܍X(GNbws8~N?vmW?w"r .u8fX7TPˢ3ۃ'<[tzӼCi^j^tSQyF%jYKp9כT=1F(.P6,Ғn(#@NBQ YضOX[1xؒ>%损|]j9Ep$gbM%ym%O53%$3T/8q:s,ms]:28 {ضc&B1%=,8aSY[wwhd}Nmof>wPHMݠ:nw>>RvS-z#Q F(`Dk6zhiB&N0/I:/JFg׀h (^+Q 0V5HT+Pr0iiS2uObal;f .= X}oam0 %->")d:z̩rJ %X ^.307k 1fh2nzl/0Yi<)b#SpR=tZ x:' t2@_Jnw%c*c tf+uWy7zm k lb ifRE 0^ߝ"l!6V[K:z']Xx\6p ω?Nf<|>_=գcEcK1თ`-%)MedrabXZM&0AӢOGQ/{qn S<: sM ϴ-&7?[7>KHC" XșJG5 /fcg.v8/t'mϡ4s( pg^}YN~Zd!-9 ԑ \xۭ)t/Ғωjs0f336<' !dN`6>ȉ'̽@ݚ2o5zߨ8F/\lv v;|" .\$}zYP9NihDn%&y2uxMԓBǓ; H;,a}<h=P0Rftp7:/Ex>,I#Lzj.x)IEK`;PXcY2>~l;sʲeQzY $p : TVj>A= LZ|b;䢇R.=0UC|[̧,eY#>6JdG6ӻ|}J8P^_F5J)J ѣc /`9䞒r/ЇRLQ eק }hglS9э'.][߂"r:iUcVa7c[ #kP츄OV~R7@#HTwF@ 2Sxeł"[[vfg 6=`xc />%O5TwdKzG,ՌjV= Q2"82#"7jawOzX1?n~!Ȓj gߗrZhnP*nz.>%K81h~)^ܽM(ŃיjD&)DU1)mn-nЪVu}ߧs0 cnAL|HaĸQ9JBȎljo<#h!G rW/mI"-OB„ox%22%B?2C h ku?-)J 1x$-1F Vj-mQ/\Qy4-tq/ES)<],QI~ A}~)֭&Cc} |{||j"H#@ ~^~hl#xF&i AwҦRD뜖qFZD7b~7R\/?z#iIv--lq/<_j6qȒ-i#r01wss1/ܶHB iv*G_ 5:{U>2bTs{b,T ِ}<:*nT?g*SRI4e+c"4*5R{K*B⭓X#iL>4Bۤ|VRߵ Z9i@ "'h.W[팠ΨB[RY$?9NU昷QzDo~)nejFu౽Ch__a\z룄IN!}8_'})qKkڠSvm4吧evŜ,L]SgDo IYOn vp~Yy1'eƵT9fTx.nb'6k|l8 &GbcbKk=~XoBL06Yg-T϶`zREJ_׫0 zXJ&ii8IB\g#MƑ,6אPF%ˠWۏ3.x4dElZD_ّD4+ñ汑l /G2\~N> I:=b;gPhZ -/f8_aNV*  w) (LE*]'%/~<Vs~_ڗ/鳗\& *Ibi6C№)UT@{~e$MiG y2YɈ{]u2ڇJni fm Qh;&dtrÇEX2'9ܲ|33{~h>g`l޴ekڴi?d^쾴|MҬj;QH _:QffMb`UO%̛8Q"9pޒLu\y !1HrsQE"BH>)&XdSFL{<|:2)SQ@ u\#{+GڪDr\8QͿdq\XKEf(Z;bw>p8)0tEr0wT ߲0B'y)ƠCd$ԤA)E+9sJNmoqOw"& :jX2Z:,>La`#!z+&X[MjŽ/>c|ͭ2V[ük7uk-xg0}<۳#{cCTz(!NGͨg^=ԅCWͲ|:p[,NW~?h-il 67#jGLSHFඔڱĤI6dq3Wz_IX $[zԝR:x4K]9i4zD6ۻ^qW$o.ܲjiLs4y ߊgc&$5:#ٖ "Y?j-뤄GMBiIJ>C;}C{F֕7o s#iV(;x +|TqN3jS!lK3a98<-8pr)}aH|:cr+9A#. {5.7#1L75S\n٬(~G/ߕqSRskwW;%p:xܲ;yrm(kcӊ%9l`Hzpw2!/ P_R&CWueBoߜ;}1I4țˀ_I 8$Efld1+:~r|D,_剹OgEyݖ[yyȠeH~G^ww?zYפ*l ĂVavć0^+ G["TO}".ᨡTaX۝3T%[yCQCYҡ}0sE4׽wF~g}p03x)ǕrsHn%iu:YG}SDW7-η Jt5HTh[['ץcHL2IIxi 83Ŵ3Z[w s&/uBtxs?P 5KfW,zD!ddu$ ^m-2o!)Q!D%}אxխ x%\ Vs.ұE\ޜ 0Vp5kXA˸o/rU))lj@@X.Qrc.P 5 *L ~$WKB71eOY-:Ewws٫zq?SF7MQ*NClq h80;-ƌx݈qss,t'/DFwb_aq!~醜k['}95C6b Mvk@ZTO@nŒ/%2?+ڷ,pC}~Ww}wEN&N[:zsz?/:dxW,CZtbfpjQuohYvM'eGv }̂^o"@tP2ϳ&Y`#;~ jbǏG:AWUΌ@ۧ[y9~1/agu>^"i{U39@~^LUp3"=XM'%KԔ(#-A0lh A0gIdwTV=SN:xTDzX\*~v8f)͠OǴ&lkr(_;uXCVM ,݉گ5V%ƫq+ 4ʸq9-ѲNЯ װ9E[ l"ـ >:$T=p!1;wX y9O=u*d##Yp˙0 :y_ YRVꣅ`TLP'*f0weob3G,TK)v[:s(q{~}q&kbmEn @\ߟ6 <1U%>o^K4Bw>(_;?SiFVQw)뗔VNPCkMN:!!VU_fq =t/ԏg&06|IXİ,—ͰBV$;nc#|"i9z" o" 3"i9kxh\ZUǛ-ͥoS9 |uO5;^[EU@5tqW9<8냈,3aFAni1P|63Bš.6Co}n᪟vźn{=;KErXs$MwY Fw6jnçr_ҧs28rࠒ0:xTѕ(JG0+aɶH^ " 5Q9ZecǴLb'Rkũ\/_5 ʏQ.g!^"DmTD K9xe8|؇~xe֑tv 7Z+At 6[(xpP*<*3@ z d<,`B&"I"PK6 zWn]mF?ft - ol DAkhe},jJ)AX Q>d4^XTHFì63#uxFYvӚm5g?vgX܌d{;R}\OW⣺׿YGhp12譶QpG/'_&diKX2W;9M|2XsJE6Q2' @(8u MmH <) sՊzX[.1&*J9uwx}J`Q1bb5>-hO/xғL-n$GM+:X&ya+2=ZGFv ؕ1d}3_ u׸JA3,XD‰X-Bpgxkn%#x"nOe^Lԝ޸2eh`TqK_㕚4_ $$AXG5q:y|Z ƅVWPo^mxn˜UP6~0-0ҧriM[:]e 51gQ(W&JطS/& /r ؤZIVß_=<]vAQ b נ9] vͣ28 sƺ=  2 Fz,K$1!>fQrD. _9p1zGepG5|wrwvV)/],Y!TS\U@UjQcN">HV޳m$WwgH{wNk,1ߗf?h$U%$jZIAKUͪSV @O])d'oQZ! ["*J0i`46qCcdP|uI*uc\/H7 4,VFf;?1*sHd{&85Ta1p`ƆPJ(X!`#3'>++`#xxaWd:.GK D)H%IȀ3G33'K!q!S]yZO@I~s@Uu۪r{)q : LScLmw՚*z]gW5#3' hzUߘ"Ib>B](/+‘X^%cHrL) vGX!8} Jަ'&daID4!(4!\<֚̉G)y-ݍHG u12soogyIv/tkk|+PN+^,V _p +Cc\j45rVk?0RѴ.B.e@k㛡Ն`7 R}6SA9I.9^}J ;2|;R\/L^=4Ffjя:htOWu1e!1LjAb-;#,"&Bp⪋*IuJs?o}Mh\ 0S1. Ԅ Fa.9xȍ5&07{ t[FǵDfD!ЦI CFػ+況xdN?5)޿?2 q+6(8mP,[ٌG8:ߧ@=xWCxdN PCZ$ C"Zc~İ߀X$Үw Cs9貯nٗd>sÇ9L1>!ȦعM`ycDb[PqÇ77)c9#*}h!{h̜XLVpdN( KIQ XlrR^ݧ<&\Qq pL%Jea.P0D4OT:h峫Eu6ʄ""ABH&FXKEp~doo|R6W$HkeW9xCgG}#3a5#]dFXdtj6.KjKkv1b+G!Nְ3=]>vN>#5xZ} lݳynp_׋8h#=üRRJ ObS sMݫLY#\|AqWDZO-c*:AхꕀTaVd^}"ȞDB8PB^ׄZ[WJkF<ORk' : JDH?ȣX+ _nyU L=;-,og[_?_tU>+kL⧹1 ,aSW U|Z૥HBf墐3`^Q g@J}~u|/4y*[°h"/>w1L Lztor}Ms><uv3aD7`JDaNjETeέM`r8mϖˢ0hs^~T=Df*U2oq^28b m.jsͳ}[ %%XS.t"e,?"j*5!''<4ڐSŏpNZnHŅ.+>\ZǶ^جGa欨I&jEÃK5j/Kw_ P"5=ZΪmfсt߁2/.L.y#X?@ø \W߿tuqfz}u^'p3AZTצpC+Jk0?eAwwc@|*聝-L?;]g~n݀jJd-kbn ![.khs~O{Y#fh܇żZC -:%Auukqa[ne>kQaiP\ÀmQˋ(NC' xC` ^ ``c t~^Ё#;E>8YAs|<0SU^/n!jmE'5HKp>Q{K7 jɣ1OžSND$Jhä6 SN8g.ʽtvFAa" q!USv`jnEܫ1-fO+7z6K_8zh++Bk>x[y?К7&;SCGOcDIaR FrER8%n\&\LzX/>}I:ѭDV*Z>ZYzZ+0NaY~FLD똃ߦ"A\4 U`#T9*HF;Q}hyGG($,XR)+Y&Q(8Ę?2{?*HYg ĶBc6^G+Aow]Ga$mLo~㑜v0]p|kx=?lTYǰDΚ.*SVwٛa[ؿK?no:go>/'?i)8 j6]m r@N01pd%B$4D2LZ)dJ·F+A/ǼpMm0IR Ӱn)"V&\Z{}Oh]SGANۼ7}z8uxϕhRL'W{OZ}l)]弪UqkZH>x[!y iMz-W2|r~W ?Ƨ;yuZ:{SZ ${['{a┆yO^UD)m}pWغp"T>+o2wg9*N4,DkI#$ 5ǰmʧ0A( 2ʣ]A(F8{zfH//e{au `m ZNWѹ}Tm.LC/]F+}A.w$XNHD^!͏X I:(GY˝r8F%,&Q]9wOYMaAg/q9S8N&4QzkmBy(t-Mz4}Mj0Q(S(HaDD*7W8VFQic()Yw6a7Z받p(w bM /yݒڪ.aD/EONd_! zraF=*̺kc3KC|Etb^ͩ|}W Zj6&,)4 a ꥫ:|\\t-ܥLj*7yTnŲ X ,f+qۯf2%V~ (>B 0MZAxA*#$HkM -L".qjvɟb*P+.{UBf051jK`꒾goY 6Δ;(YfpP<ր_.ׯ2mfn }F0hJǍ2; P\ ,)Qu{ȑ-쏲eU쇺n4|QV/\ 2UFUv5U dx-[~hJeóu{j_%lW察5 e^SjA">^A6ܸ@FNfciyCmNu连}9B1:rIw0੃C%g YaUUVq^:c[5$L wvP7f{toGJ,UߘOrгh6'OsdM`m5k[< _To 8|yLe,FTӡMU4Q$aadcFMT$'I,wJqx;W8Pc*#Dopi$AyDR7%+ԝJ_o}y`="{^@jrcpp ]-wsHcXTb~ ]cq MXS'7 n9}qHoϳFEz$S![Mb%j"8x\~]\h0SO fEdb Ffm)a&=fH"Q@P84Xd5HpN48)PJxPNGo v>z$ ֫dR;%@ĤιAE8M(xrNH/szr)/Wq5AB.^iZ007AϾa7XT{Aɼ* f?uw~Ǎ \Ώ%|tR~O{۸:ռGu |GG"KϜIHebV1rG(:߱+saSu3Klj{p׉Myy!~nDǓ?{ԒZ!$'BEJ%[ʭ^:0ZAyK k"y)/a}eL)h,%I@Q \ c<)wXQf.F6P;<ðu@&k\T[<[ci-ooc|ͽs@ n)Փ=)#':Vr /tE8{KH("_aТZV-$Sъy S! *ig}`:tdL<$iMEzdmZR޵Ғj X_xLqjJpae3Q8r8JLH{@_7>@K[FʖrqBT&9NѠ"5$LyE8AT#׵˓KuoYv3w4 kCFBsc>@a>]u2Ov:7' | ւ3T[]6?@ʁ9ۊTD)AszTP= 8$\+;Rp'{]tʵȃɭ^n}MnႧO g S\SW&\.Y R 1a @z펾V}8yU0E6 F íRS|XQ : ,7\xDߟanaΚvc7}oϼ[BXGhh>^-=we}_P|V?khm!(pjjzg$=cdžl+})IKģ Zb?F|-%mMƸ57%Z!-gv?J3t&ذn"N{x3ɤ%K)D¤Fji ws|>7?Kq^E;m<~m^^P}WgYu|u98G 3}hJ>NC0WϦs8Aı8 YFP"*s2}Z.qkJ5n-nMFJM JZc֗8 7%%`i+4XH,_䄥oc%aub?ߋ(1ZGo(?JQ蒃AObL w6dg~DlXe԰0.ӗuIp>RhNZ6ܡ<`A(Eɯ)6O,.ۋE|IGį(sIxZL7ͶF]-7:,SmKkf9Nhٿ^v߫vb"PҲYcɐ+x8SggNs$kY@KRKDUP\ڵ-3ESӱ?Ve75M@ȎXC'cxu"NYb;RTQgi1Mve+-ަ32fi&Ȯ`,$vQ DZ@ۄEüV(j̻$|E ojмd4Bh/kGn:"w~;qqoN|']6|w3wQ~5MмWX1*r9nRTC;}BP߲ҹ}|f9RehAnCgYPx"F(r*z!EkJY+S!h6N<+Y}y|T'{(LMg`Ŧ/}L9㊼=kܜ")J~֯;Er1B^:kdz fm6epWLuڹM3f;HW43W<*&v-p[Yy֣S4.T84tffe&̯֕=ef(ۘtEg Ǐ-C=-ˁ]d<#ܲ"9XNI@2|e7Jބa DbSOy{A>9n&?_P3ęOwr0凟褱EgE:F ^ @i+c>G ;1}|_"+8L0X0n/i׸O`¸wI:XP{'2]{c3֙?@q%@l#Aé<ǐ|*f1% 'Nс:\lb>Hg5DTc+̣ј@Ռ~q%VH("./!frch,e# dR8IĉȲ`8ÞTðtyQm qZZm7I@ZIJ{/_:h b5x}h"C%D6(J <(I16'|Q'¾gSv` :"т8TJل#R"1:X\j7(Qn,t",`|DM)_`~Ás1 aB 1C@0KQR% !Kp9yЪB.F&j.a.Zxo8Þ5=81RF@,F&PCRѲNBD|H&'ϣ#^; CEV3H YcD$rhD ŽQ%8u63^Ma I8q$=#8- wm]*~&" ISd(S%ctJ uQ>PE2ZMeЬ}-:Ea;&1g*VN!A (u$TҕTsў5%#8E=ktL*&Tԉӄq57q5ښcR ?14$gA8R=VHQ3CkvL~By~;㲘7aY@]&mKP1{8\.-bKXWΑ")ju'h^*.V(`{E+Wdq|`Fu޵6B0v7 fCdceI+Q._5I=-ڔEʞ!bYUՒR1&~%|}auJmk`uuσip>) *vh9 DbYO%xK$OzH@(. JX`c4u1᧬Vv`up0˙u4 F"@TKtħ[}ՈNõuk>7%zث'H:i|P]5Sy(^ opaߞqE+R++FTDQ«N^ν C=+>qNx1UE~Ͼ gsA`TҚ<*c) "!z&lItWXvt3Ez_޷VA8y{;ViE{gq:"#۸hHtX|2{wmg݇*1 Ԃº0pTz*鹦c,R 9UZeG=EwARkqEG yGhZl]L " _.O{ gju ۷nF|n"`Ahe,g<5p{@Uv2{Ҷz !A؏޷({(gqJ@u.X8|_2ALD-Fpt”v` l_=n~tg;Z--Z v׻ M=MzW C) mZcPV|ɇ`Id=Zy y =.ւuD3.CJwv3Ut/?bko\LP߻(iwI~(Mv'MpF=SuZOg} Ii cL2<<'0 HHr"cWQP'GCLѣoF2ʦ8CʝeX`ܑuF 4`t3 Q̠Q3qȋ*w4giuFّ/zgt9`_=rS's5,`[%}C8E\pNb {sqCNmΕD?>X=בeߪhY__4 î_оV'jn,SS4)!ط2fت<t hv 1: waTd~4 (+{gCШ?s;Ίqx!?{FYǯ4I "t0A*6Q `'0jL @I2R ME4?S2!ZeqPW%%d١]f?2bgefW?r̬k  4V^YWkLq+&O)BݰMxDoJ췟.+D,~Y~3Jؠogaԇi~Se9۱[0ӻ1i-ΪQf6]\h3O }͇8*x ͬx;np[0,.Ѭiy=}DDw5)>~o7{-(5`ej Ò/&vu=v_ˮƣ/qn^yjGVH\c5XdOUA" ʱWA"L10#FK핥\H×SN,/$Hf[e`Ff9vurW~wJ% ./EVc{9zb}]jj.[M_򲋥96x`mR,PV?H| +.mr~0Kp?>;3Gav(ݠƹ+kuwuͬUk5]vܼNLP]d&H]F#"~G)K0RqlI&[$.&l s d@`ݽ-& C:=Ah$;]$u]0 I˽&-?i]rn~+oǑTR}vTs6ϗwN!LU)c}dӽC>7 LwuÞ(El>B '޺fV]$84ha.nҚNsWMx/7cF}dil9}_6>M[<)hq9X5|2|RQ٢Mr1mv46GHxrOkUȹp7&Rc{KE(מS!P* r|(I'vWCe`d~;Izjv^T':y dk͹r_]Yv˯@i[aUXV'/}ú΋6j6iU%a/J҃A}_8fFI_kz~aڪ=kSa'H+K ff8Ʀ}ȟg?J+3(gQxD~4 4Y):.#J7[j:}(x}a؁'K$BI~IF,sxzAS}/w9~,R%T02+l;E5@8X*I9J$H&+ >*wHWscܵ\_Jn<rwT|wèe>k,ۼ;RӫnFQb3Gn7]پ sCܺK{_(W\ڒ֖{kX0Ҵ؞ @H!7|i?\"ISs9*1fa0,7NrEõ^&(r-ϙ0.&x2Ɖ WG2Ey/FAeQ:AxHo" \Q>d(1uBA9+% mj*A#S%(MW/JPJP*AUTWcNW)*:]\\uceDiSBv&" ln LcKYdv1e[v,r$Vb*X4e8:L++Rfm;0l,`RreVlוY%^LI IBK jҍ9)W,DmrZ"=%Q]2\u!_iWTHPh9 vTUdVg)Yճ4M^R)O-V_?o7s [yHsa¦sF'M0T ߚFay\ojcᆘfyMކ񄾻3pò)L]5CڛH_ ٳ6 boN6 )+iba$j/Yź\Hnm I?_S𱄰GAjK~YTqᅦEoT[Lʂ+FYBj0+4oz+FDa{gٟq"/x&,|c?2\FK5I wT~Fmi&pBc?ѰoӁZ@΢_-O&R>VIĕG2f|$SٞϺ=j߳C=FyNGKK`6>v `5 SX 5v~b{4GDrob rojzl@"cj)̵ӰJ V* *VRdL,> 9V÷g "?c9'u.V礞޹폱h&펱o4}mbOp7:3hHأQ=\T&\vaмޜfZdW܍6:z8nn+Wpk6)jiݶZ!쏭Vȵf_l5VsZRm5-$g"ڏM`⩝+Fz"aTj-j۩, \r% \j{'=\*W/ ~*Ѹ1$RH*[c l T3\D# Sp:e/U(bbv!Jg2*Y gr-e_L{d9mvηbfa %oIhP uH#Xˉ%R1VFHʼn \BH2)4%k͸ߊ{a v7:F[lUF[lr6&lM*5HT#rKGUtrgKP.dED:3f!O)ؘT vh<\b 1NŖlUym6l.kGmQD&SR]*R]ZY 9-e~"x!W, ˠJR6-hH*`%SQk Y ,0+]$bd٣wԎ@:-n']SJd2#9JYMQgIB18V5H<jm'&gsR;B1N\I 1i-J/g_z=g=]g_?͔Ό,Aξ/Gækk6WW=4SSKuc<\x.7W6_-0k)t/U1ʜW"S5&-)ZY Kj2$YM#/_a0KR\8R23Owc "c!$9)uKg<_j.lkPs'Jkg|`' 4ޱ^#yZUy )7r83eP__ \cV9_3}kc=ߙ'θnGIt Xk j@2FBAHvVu*U)dwݷ:dMJ&{KYQ1-i]|ހKg }sHz!$hךet7cWCfB h f]`0pg.O !f =5ϽzpTTu@?sߎ}wK+&N{uH }LFET8D` /'8( >8 WOK ?%tj2GDsOZ4ͱ-/.C0vG曂u^*fTƓMLq*ZeR)G$[Pj,v)h0&r~GICCV> ͍dEMD~w|ECCǛySrۀiN:Ԯt:L Msޝ;t+7>9h8YP167tQgrCs4VG'{إ0y œ#S1'A> "@L[;y)NpVVupq E&s|`U& ' !fO+zՙPbr M9rpfd>(C09Zfah} "7 \l;,-ѨHޞ|E{Ez]vir9 v&m/'v3—]]Nw5z5'EPXm0E <`ҁTrTTJ&%BimmD\U$Rk[vqI߶':ǿL U!fÅP5_iNBF/EޠkH!hS bWa8`Dg·-FȎ1Bty//ىV7DT+앎=#Vf<0p luLqQVU8H4+ϬQ2cUQ"? Aַ[Q|0DsrP3Y9t}Ng2h'av;Oe)uxqҠoonr24HkcFz'g,lC*̑ @0P)a e[,N}&br\NNҍ% U;*I2@bq2Jk*9gIiqe =Jfg<{bE Iimq*J/}P3b)[:t~/uJڐdOz9Ar1̈́ƫ$x! T`r#FqEi>qo~ٶ^>J0Q"#sWAR9$ /w.\xE#!y9$=X_.6p[@m1Ji yt0ȴv)a!`1DVl>`hQ 9k8;U|Po{K>Y18?GӤ`X{87p/ |-ahrޟVL[DžcZz8},Hm~pJmamjo88nowUpO%Ǐ^>q?޼]o?~#͟||k?&x@kI0Ik+lAy6]uMͻF6y1I!u,$_^4L]Ibc7[) kmI_E0;`:@  fpE򈲓bUӤ$ǔ,fl6Uտ GlUa1uT,K5 @84JA{YZTϻM}Uu?TVoabE{/Z 0M,QƋ ~m$VUKPosrRXѴ^3dA:۫*y7WFru@}ހǼZ>hʔh]4Iiq^Jc#)#~m,s)0>͐:zpS[ij ;/&!|e[Oz7߁|nxٹ,b)8]u V8y`WX{P6ϱS|c&qpke/^m#}Xglڵ,s{<9BA.${ݠB>:{WζmHGE8\d$CA(`JX幽RUjeLRti_Xxc&"& 2@ ȃDXG$*0I)g9ke%V؎#j:#gm+crrW;n[,tE ZM1T<9Hrmg;jXx#( DHEt)]kX[UT7ܲ)~6*'jrXѰ: }b}ty/QI0Ze0HIkB;?uWv6,^ 0ϸ TK)jä(ϔL!)141pϻ>^z{A2"@`Z8%. KY&`{ -v kKZ/iEپok*idgy-ge ׶>9`Hua\\ӣ|M{22C C"dOc<LuGG^!("&E/^S|‹I\*¹QhRΣq"6hN82v2L,1▣Xhrw|ا3r8 P'뾝ۡc2ϓeAveZ|eٖ[\@v'eDH\r¼s Eh !8t)"" Aj!4Ubh;Ւג/KK>G9IV3NV xF!ee9R)m<Ϊ(>IշY#I H'H!XEUcbMhP"L2g=F3duIIwkpgz4 BFBsc>@a>a #Oss(qh]B,@xb5uVx8Ԭe䚂jϨ5@KbƄ%{t8Y-**}D ro͹o1pY̮2݌vњg(\pg(|8j'i1ENHe"XOI*vkxku7bJjx wlPĽ2 .=jB`ZLFq%Tm R`xFvaE(MyP j '=(zQ!c=fHv8JX%lbS"- DD#bHT,D= =ړ̴IG67#7٧Bf myHIMHHԉl:o]IE:vQ DZ@ۄ| [VeTKdC80LJhTqItT9QIR%2z0!->$jĜn/S|goΥlގBDP!bQcg2qg%I$W!J"c忯3O㈄r*oFhfk(1: 葊p(G/EnDj,3 sׇ2Obb/rN/tg|Z~ ЄZ̹9锷_v[f͖cT,og dG<ǡ >(L(:OR-f9ɨ9{[eDOl2Mt1s NR3UsM!0}MR$7V>V≱]ioG+a$HF`,ۻލKOD2$俿3xȦD٣ YSՏ:ٰ"M]dn9t[=%3,U1L`./|}Y;p.'SGO^UB֎kX<(pf8mdy no6[3I7هlqgٸr jMgp_%5L?6UyW=Y*hSV]z s\^Ҙ$aI /7V"-"yZ\>#vqr0̊/4wFLcA_ׂzZ,ǯǍymܛ91]?rZ^=[-P-& -DxG 5q%ݱYLY~-a My!)VK[!4ϤK;^s ^dm*]8ϯ[21ĸz2pLȵz^_Dz)C YC S{rmmep=}zi{lhSDuk)]N(D)F{"MUےt9iZmm6}Bڥ42].LFK#ӥ.Lwx8/9mv$x4Qh #f:0*p3 \*⎽ey쒗f;_t t r|9u.x.NG;GCIk­X@РD:QFʄ ix6He ^)}wS>n/MCb,G5%;fni`%~$jϖyA^q߭)x φ囆=JRj0xYT}INu'?UaMD5O: 556BӗO^5Ǿ xr/l~/`9'Hcqp55u85ٓyS/Z-_3:Yey7st(#dN}U^yO\Z;t-ҧomo)e[V[ m'S+ o)tzGyoQ)IzrH3%J1-* &B[/0(WшȨ,4JˀQJ$ɽ9-gwǥK'zh.rp`}Hs%,]rѶUTˋOfEus7xɅA4QA08hX&azGr3ҩ8bX8kdAwqTI^:B9'+\63dk]񥒘r2cM5<p dž a(b#9chxG[JF〥@YЪj5)E0._tA~ gq~o`L7!`8f$yB8>N*ʣXt;|D(JYAtc}2Fe5 =(m{kh٦F٭}asbw]yD;VnRpRTckFR?G) OۗaZZ ktdХN=\@S"nb_>vh}ǷU[>;>EEXaE \Y=WQiν*&*fe"Ju`k IgcVsgF*bSa42[>X9)b嬤x<aZJۋ.I0ZBX,rYFHm5"7q_ʔPi2Z Fp)Q Vٚ*p}}TVuFBDVL΀qj!9KO+ekO Ѕ; #? !S° _0D7ɼ:`y汷kI013YPPM&7/ 4K0Q21Gs'@u)ަ3&x,4pB8gAKձO.6pjUBK0o.hA0!i`!hP16â;b6[5V 6k0:x_pqi>$yp1Agoѫ0)|~FjR]P,`hrqѻMo85s :NwꪃT1gſ#h+Hnj1/+B9YSg뺰^~\m_Wϟ>{<~1QO{zŷ0G_LBW6A܄wj@{ܪOC*WP_S5UEtsvyE4Voe3 +b÷}75{[3.Ṉ[׶Q7_f_{MX*jSDx ,\+ pګH so&o<]4UȂ5u-/ׯM5jGi(d]ӦW-m&/ܙ˿amFݪHdhrcPZJƍ7 /~~lHsy*6Kj0!$QDRVDW++Gf?;h1v9|zR:a kϸp*ƂT\:ic蝖* 5y ֯+r6MPʞDth+hLL!$(XD7XpZ.D"Kgr6-ug,NɆ<x❗;SF9|yuO?Kᬌm)J`V"T?ӫ#XG>N<9EPٖ-7]_1z17?\[^r=oٖe˜5xKAG<3J,_xЄ-l^G<spìbt+[-BV4K%n R FFaf­N(g>჋SOF+ 7WNÙ1Ǖ7m0nDmyן(pv=˽LHo2a,cSS.ҾpR!N^\`{ƣ?ͦͫmwV&r)^zx촎W.jӸx\(.Fy>x;an)t'L!΁܁"b@HL$E\8T!G Td0:x+SLIW5#5)*w@JT?ŗ(E#Zt =e$ZHJ4-I']QWS!hfy<ŷ?>7W,ۦT*Ⱥj0ö́c$jWN5ul>+_ML5TvZ .ŭ-F0ڡKw$?xh%Ӹ `j9% k")|GS>.^``e%'$ =A#U3EFSiqkرHWiKUwK 㯎*XгWaz؀͂]׵KӦV5keP}-h<5陋/rF]0yOT)5"%X2鼪 ;T;ꦬ ~F;Oތu9 EC Ʌ^z=ꎀ+şoAMǍeXrvn9cs2<1΃~о+FS ;o.LmcvF 3ʘQ+imn c^ܷOLjIJC_%8LWUD`. Z"#ĂLPLФDŵ+jlმblbrfO'0N ~GO+W6ar0h5XR!2/Hrmg;jXx#( DH&RvN;G㛬XWTi^ny:4:o7w:g ^$O``3ϓքb-v2, Y4-,Nt 0ϸ TK)jä(ϔL!)141pKH=U'c s`cT0 sq-mHOa,}i=;\BVx;I$}Jڳ=El.Dҭ\te9f*ys [w0-`ђ?:+;?n55 eD'#>!D{0mEGt7Ξ7): eAJVsb"D"QctFHI2`+{nEP\$ufQhR梉q"6hN8K["g3Bh`2Jw9ů1dD[E*O 5^: CZ>KZr.tyH}R޻\.Qd1x)1setu%8`9lĎ1OZMK+"gJj2TZ 1ll"rԅHLGm9ӕdk\ql2AʖP-cwzt=|)[wcR8k)k1v.ή= kFm;Vju6w^w/zP:n>$7N-{TsPLY4i`>YLD[b9O[(XYrB%ц:p™P0.$b!+q^Jk0XUgiV:m)N4yr*ݳ2ѿ,m7;rZn!ih>~h,PL^1|{0 U{^"}nr~lGܤjͶn2}r3Kwɍݨ* 5eyE?7O3rWm㞜5ɫLxԖV]no5WmU?IN˫MUx7Ņ0gyil\gջQ:\j:?k߼ &,m597эgͻm1If q],0n p'vXz$.wYQvx9jxzW%@tϜIH7 F9ø8c{*iwoh{@?#iYj~5[g YΓ!j"-WFII/c !<(o\`aMV0Zu5E)$I.e:}P`sfII˖k&qa9[\Tl\z o2OS"v^i-a{"Xeg\|JGkP.zP'.X:=ӡ50! zX$h'󹃖.$Э8Nn9s]}ټtױ؄R@'Tt7 dΦr$\@*D)+@U!fH!1=&Qgi>ydOVp{|&i]GNH$lP8q 41%' BlوK @XM^3,.k" 3jM>d1a w *.jC<`oBauIgC7sZJ+bۢLj~R_,w1t%'7MpsRɽ9Ef8rh5 s: B_%M&-YJ E\$i1;T [鲦6 /~=`E9HK1y7Vz칔XNL,~Bӊ0U6LWXBW}.WX0>WWchwvTkUwwsEʪos~W۲@|u\`TRW)C͹šu;hgV!切rB3gKpqq\x.8WJ@#@QP;^-ڢGIFX8Lx:ڪ]hKtֱYPVF-1qa{(LiA,G JKgA(Ef(6v~GǾ"c+.Ξ?"e[4IchpۯtZ~Ov?45Ɣ|_($%s*<271f:Z+Zi#j9x,{Eɦ&9:5y(/&L&+#}R__o~,[T=e1ܛ1ŋ\db_c;[Ģ) S?–GA ~.՗¨|՗qGUiK*(yF;#;")RKp&y|%Xnap!9ZMHL^62`d4Vџ׿kߚ'I5j$ ߭GĉH Tc#!$ kwgU3Q^-aי"F@U"XDc,"wQKfSM϶nCrh9b^$w^z'ssF~ػOmkC;VK>ac9]M'7)7 yN@~"%_ͫ|;/7s2 jтiQVP8L\L'wD2;s$t\>`uBq`X;nL+XzݫĶMD=hK- {n39m4|v:.t~cwZ܂ t䶭&b8J}9Imt3<53.3 1Ľv;YLMwhmntkGgچ{&{Ա+&m6tmv#F QE5l.;ef(N:X5Kw~٦z@yf(jԝ8Ox 9N F|Y6SFtT>`Ml;g4i[bi8P.\oց́>57=mg],;AN >,ߺ#>]YV$;P 6 WOW=b{E I_t?һU<^hʧd%(JF,w*eŠ&q)3ZOTʜy)Rl} g])hZ’;+ {4:Qx$ ܅`&-saL, t <<7+颹(~9ݎ^l< ~JBHO61W ` O3aݧcqg@[Fefu:N|ت<Ÿ1(qAAn9g-*y\bvvr]aΞ(pEtV;QN+)rh5HG(@Lw{eTZH_=@ mA "=Am!\1cШь'0'-B*zN?O<َ~IsNK e?I N*q}ǝՒn dq b]S&W`9EWތj~),<'g 9ybxPxџ#hv8 x,SQ.dZݶQ$Odwq4h6]yTMgW79OͰ?jt|zmؚ7{r*_,uX} $NɷG/Rk6]qeFtla]QY s5ǂ]w0x&:Y+`BSCV}yU>_1T#E9عl.T6 ~ 9b;~R&XgYj\e|uQX]IέAX4.H}Ca{_Ww& JZ@RDř 4P rнkg+UV3/JhP!D$SPc(.|1"|נoM=N8jeH!)ti`FI$dF4Cx;-aW"Y3 -͚q5H+c G|oAKJ>iaaw!pn%9JFt@z`P 儴w5aSp:iKAUq2) =GkEHNQ,Ț9\͐zz"/o,]"8~YtZGP ctG}fbcVO.ZY(S@;aINwWNFٽ~ѮJ[h{ϪLbwAp>UxDV8$02J T5% yCRgb70c(t+x8QEH٠|xc(h%VMy:.I4zY]HP!Uw^y$\71c9Ct幮0NgYDc3Tm"TFk>CJ [ ct%s-o𲀨Tm1zO^uyl; jVs:`/T!5A;{ UU$[R:FK"d8c(^+߻F1`AtĪc(s] V˥ q =g׻QTցShjk|9%"s^d 7Juo>` h-1R*皃P!tn3nOUc&ѻƥW/9+pkhۙJq>::}KHOO޽~2kֽhf9u3)mBՐ#¦VOiY+h84dtu BkH$gj2'Y1Z]^b`Nk䙞 U 6UҬ$ JĈ!"crQ&]Y M۪P0RJZFC!Sl|&KAD,%Jl`Lվu3-U r,I%>ma}I͎2He0lBr2 +^oE- e ^Z\9!gMILL( 0^Ҁg0t q0sg0IH,N[U`eyy,/E Xbn)|uG,__޹~:'8roa7+Fq.I?d~bS?O{#[O{fgf|c9Zfy0bcpo,s͟K^KøP~o~3אd(m#4f{09<]{{ӅL"j IGmHKcr\u۬HmO65s5$o hswKVBHa&^N!1ƨǎ/"$Pޭ,i| '܀ԁ^gluEP= W9綛KMV-;}ZM><ݝ8`Ⱥ͖If9<AUG5(fC[΅&Ѵfve}޳ G+xt{3}<~7[mk9ͪy:Ve_k,0pQQ[Յ&t-j}ҷ˕leOvomՀƬm7avW EZD[ʔFS Q?LWYjS&lM?h E㔚Rcvy[cGr̥7=C$5qW̰qW]MSqW[_stWή>a0}#?/Xv S8!%֑w;ag{~(c{zm!]璘n%kj^h#w)(M7y;9/qjLE`d:o`鿾jyg~#>{^:-sjeMV߱;d8_74` >ǻ6PZ]lH"` *UWF}_pz_ZsP?+C5mU߾X~<;˖xv;GGglQltgKsɝ%h^IJ+[vj[?Df|q @Aq;Y)2^ŵ;w[J]P҇p NJs Pr 6OÏơD"{>/} NN9{3v+=P=71S ~%))rC^7E\S*]?E>ϋ1/yBJjh Ls@c3ly۩9E{s@Rt?dpA tOG~1HB{9zx QRA\!|v~w?|󊵟η߱yIn%.OZܶ.Zp(Io߻r\1MʷxC{w'`* \;q7$eWm]SZ ['42zXqi^\zCWWyCWgCW11Wx3X =QJ(eۚ} t%{W2҂U% o r ]!ZNWR:HW .Gt6EWy]!ZANWUw[MOAL"5t(骃t% !'o ]!{W:IW*o]`M7tp7thOwU eҕ&YbWXtp7vD[DtA2D+=+?tp)-m+D)z懲te)^ `iIdyCWV~3(Mz>tJ^gڔJ;wU 81]UCNDWP tzW*vu [ ]ܓ\ -#m+DiOW+F-=jѺtdT.ҕV˺kKeAO4ᗟJ9W%ƙ܌8c:U%L|U҂p%0, MuHCx+boKzr͛h7TQX% V(Iꤰ$f$WfiLIJYFCT{ ץ?(].hRÅs"M %,q"C+m &qqLc. i,uIʈS%25Yl2֒8kM3)S sli,c,5'Pp$/%4.l8?\.kOjELYB=+, %p Z%NWR:HW _ğP2¥xWVԤҕwj㴟0Bڛz@ˉh;]!JU'J3f7* NsǬ`maUJ>O^$e]8ƾ:iT/NoU2(P#hl?spa[Pԯ ` c0R DmrOlXo Z ]ZrePbOWӕ.)z}F1l[rljpىdUC+Oa5eTu=]+z*#ByCWWZ-B=]uQ ]!\ ]!Z-NWUK ɛV+@kk;]!J&z ] )U&5t(9!=]uTFo զjhy+D)tOW+(S>yWͪ+k+@+i;]!J{ ]iŏ{^5$2h%ȡէ+DIU(ťOyWX ]!\&|+D+ZBOd"]Yeɻ\Ȁp7խQ=]=2%EoΈ&F>^ 5tU.% VCOwU lYT+վZ״BBWVTZK+l􆮘6'oQ jVCiOW]++XRDZ,VZ0*lQƒ+1c,pE".x"iTi "Ϧ]W)_!+LV{c&h5iC,t #ğ¥t(ۖrQJ2 s@ }H&WWq2Jo.^Lfgk2i\AɕfOcoX:9{?~x7~v%:n{6\U4}Y~rX񋼀Ab9y(< qd 3Pzp-%%ē:%Ṕ7Ď0m|~0Srj?^)>- (\X=*zTRaqX)W-KLg +=V\)Y:7oźe3z@QV_#{y0Q*ä2x0 V/?dN=ˍ\/,53׻L4A~2˝,e'L͆nګo\W8j%U~{\]RugY7!]`u0p+{`/΂Kt.t~ ֒mʿYAV˅ZŋEz+moq9[a%da%c-FJ9m|f,Zuݡс3{Ԃ] 4>82 tvM7Ly%{-N%3Mx$;Q %mW ى~d41[+QD;ۜq}|[he!348I%%F] fhS~B~(;*H)S1r3uqR׆*fܘ2hȌ(1ja(&f.f?̻mVvT׽[n2ܧZo.3̗@{w:+FLPBe'_뾻K/nxkWVG<ڣwy $a/( hy._}y=Ýh7WQ~CuU8e~ڄc3@p1Dhƌ" 9wZ*P}ݍW~`ڻm3V⑯$j ,!Om]%{_(ըb;}w㫧MƌfFQ(M*B!bF>2YfD0l>{->\o;l,! Q:OH(e'݀8:wm<2фǃ¥d;BaH;Ko@o&_ǻܞDNsL[*u1zKof63j$2:<ibޏ : "->`^W0 W~Mf:נ_!M 8'6%2Qikxk菗O`DXj¨G5?> ]`8/gjk _ .~KM~_emK5TvmԷ){ ~0{0eQRa: ) Ne+ƥuLs\+o< iGC,&~pWC+Y㇈R>~ZV`%)p7n}^Dizz>teKޞaU%TN%tSVCZVV+վ ]!\|+@{. Q2U)Z{`E+˽jw(骃tűlfPUBėvm-W" 3ɒPTƣHeX(}mmI "JQc" љMc8M J;b7vdbb^zUr1(,3[gG#0D7`J \|$m9|#)IN%` VhMJU IRXʉ$%64N1G"zMI)P)=5tI #fz=0n|Eg7s|5s\g/xܩm.αqzn/7y K~M:jz1J'&`A拕ChL~130_ V o&)z<=l+ypI9֫Ğ R "s{q-=4z;GmZwBJz;!\h;JNt;uw0vU>&"`鏩@Jb*m7wT(Ig>B.lStp9&2vBlHWZr|bxWWx]!ZMNWҊ:HWFJGt- m+DYtBG}Ӡ]!"JǮ ]_J39}j?q"BŁSUE4j(5m{=]#z _r Sb+˨/thh;]!Ji{ ]10TxDWI ]!\+D{hmOWxU#v$'.fzf, ^ 0_Bvat/>*Y$KU8RTϹV4QVdLXfxI(.ILT$aߧH &GF<˘0Q ]Ѓ$[G&Blc^G',?^ a?гֱ_x>~ G g/ea育U1R1a^Ɏ,)6쎹 ?%ʥ$x"G\Qޣ&7Du(Ln&4`}Mnk>^+ioϢ@)ZmI9ic)~'א4ӷ j#y]hRT7=_wio/%]sӜzsu`YC[oCȧ7O]Q5#~|I)E{kXhr]E㪚mzzVg?Dv)p6n> F܃_qP!>B|@PT pYIms#5uEi0qeZh|_*8v\qoeSqN{4d:.,@KiSPWȟ;}S0Lk?|oJ6y t>46C<5v{|&᲋-Ye9eWwv{`^۹r.NߌN_*{^eҁ6jJu 0ք(kCÉ)luT1F<]Ic(45֛ՔW^9ip_GMgpeQTIr* 1`SA5$:Ѱ(!w2J'si^#}v󻙍26,d4^ǰ&,*=nZrsn.S|ݼ2A2yY 7J`椓R0J$1.Dl8iB]J3k}3[w[gf1Z\,osȜI_y%: |7J󯄿Tpm9ߦCVR.Dh V]l2tQI}mdCs$e^L8=xT$Y!{ɶ{W[I>[4-Dq7~{ewt^-?մ}V\T{~Q#+$8ij>&rk#W luvL8aTPEQI%t0cW luFK"eۨEIh-kcxe)=W0BcgJɱ_a2}9{0n/.#W*bTU~HnI2DLb:H |oF/I)\h:L_rwM{F7:VDul`&QN)XRsXݿ)tm6N'/|v>AHDhY4wF&MXplG; G)PzӚgw R~\rw%/C'_tХ{&IL*4=q]kKAl,AA:Ͱ*] U nbw+o{R\~Nb5ԿR{wwSkg`QevP/5pTC{4K.YBrv$OVOSEߺT\*%BjVHLiott^<m0cd)^Rبo 8oH1m_=8/jl eAպ(L$ڐBͣEdUJC?_jT$p[܇ֺR0Xr*Y0Ka-a;{# •BpieJ"&q!Z$1R4bNwر%8Jdr{8=rzcG\G`W%f=RHGN=}g1\ &Ji+߶J$:U^]\ҋhaU;t2vxB"۝ ]:(:=ϪaUE0D2C@/5պi3@lj%bMq(ݒ)|wyYN T/m-S(uMc$$)GEiӣٰ s'[^4A[{%{g+4e,"Rd'qch@'. }5GŒzC4 ƅlOEӿu&DLBNw\|?N?7leL2_¿A:e8Z/ ]WquGchEFgJɳH]zW=x;1: ˈl MVo92p[sP9.^^ٻXD1FAexF9ɓ$&Ǎ,֌$湉D0m0yV]1R+i]MV͕,!k$(iÒ4m۷L]Q@vg}1G;.H cAKE4߰4ô偒i,B[,'XTu0x Qbul0 ͠x dPO}%֡zKÞ[:=V,Ye$985zO{ɚ^Kv:](9g Yjeu/R|N#)mZTzuln" ~AJjz5̦Lx1=Ԡt ZȖZH75ZCL*wA]ez[UOILl(ot'0\~` 72ad 5ou%m_yE d8|棙7iPM+T1i&ķs;PQEnav "ZѻR-.s!Rl6{z~{YyJ)!3F/ y}1Zf  >O^s+zq,o.Kt 1C͓%@ٛla4< }IJdȞ|XkLߚFNp!+ՈFƩ"EfˆP,NM/9ap#(6?׈m;|Ԥ{ͭ!жjYNBu#dp(=F?aԐPK?2&*A_9aL֮Taf,~S4|LLw+?8ƢK*>ߎv=e"Bu)S(FHSg4{Oq N(}!^{='"RO;"( wrCe>}Ovm@𶂴&n"#PO/ PF<wXk$%}<=œ֟LjyC(gSmdRQSX)vR,??:΀P@ݯT4(ITЁ` k{ T #8P5kvpv89= Rr Ml[oOO<| .dƦ/Hl[Kst& ,_ߜ1o c1{Dyvtog Ik ei䩁DQT6Jhۣy9K1+Aj&=1|F腲B$+G_rWiBwp"$'m aR~CuG-%Ux6ciō5;ZGJ֋8akff̞r)1Ʒjl=P&aFv}-25'KShu`<1X4捤G:ea2[9CKap2ĵ!)pDNLRt*^PC'GtJ謎B%r8 f/%PpqSBɥJz# ZFox1mpdKsܠ((0!*PTjwaU2hWʎo9=SCd%*>i9",3D(qW-*}w%v20.ϛP̝H,R,́h`0Bc$o~C!.9bn =V^eϓceϓ˅nhC*.͖㧧cHnKU)bagD,PcTqT ۃ9Ug'7!_V$-Oå2`8Q8]zg?BT;!2h,)75u/$E&7)Uc!c0*O%~I`H. kkSS\= !SjdYP,@*2p ΩF Bn_  ;Dr2sbĽ֯ Vh}%œpѠ?BƹTFy2" &@q tcMQMag~N:BEhS1tD_'ևQdz$R1Ȇhp[acixFraBr͎w%\zhW%0Dut ;Ul-I/#SA|ONi-avDt&(~&S8*L:8PÝEQ2gu=D f]' V&#[r:t5'w>g)}*R'^7~ij?J-Vmx^S=͚a˜5}\yTw^'tI'V^9PQ4y&(RvxBQLR,M"`QM됊[e#ҡ߃Fd'/V`KY+n .sMcNV|rJu9ܪw̪eT3kK 7Sk`))?_^taMeW^S۹~npQqE*(^)ӠZ*.#B5}N ؙ^v ~l]ѾְYoNȱ% {MJ7~U vw-jL^Fd$ҠϦ낉xR{븁.=FEejn6e*hu/30#฻߲:_8>k{׍cM.\RK~J~^ (҄z:1x>L 6JsrHab0׳@ʾxF%^VB+iV+IVpTK(WPb͋dA\/[\R ARi7/ AqJ 2`Z~ΜIbID2Kh^bVki-p&VZQ7o  ŴUK{6x39W^- "e=c䐴# t㺳V K uBTGd٘_G8`Ԇ_ER(uBZHHߢu9jh 4]bxL ͧRN)#[\XRnUi*36EPb:m#ǓR[`l\3E[u!Q\~ګ8B<~tQ`\tA͎Q#*PI<~˖1Fn␇ 0mR}mMimޖb4pcTym! 0 fy:DxNߟDF0X @bMdˬnyZeRH{ՐATJLXkP O[9,rHn K8}$^hctaø> ;>Z֯VxRvLHe_g>Ҵ#^c%w]\mѵuΥs$0| 0)'7aӷ:ՂJ-;ւoUP (L۞w4ˈĞ<3K<:)oԞlag$a+jsS^yv_緂F ifE+\Ȗຕ*ѭ.m1حQ;!mЊcN&C$H&BqFP(r#`De,2C헮JrKXHT2Rw_ბGnUKTHʚF8',F<3984g28#;L~1]dha|Ym&sd.4h˞>dZ m [ZX97znf4#0`XgdT.CIUU\ 0(p+#@Z]*|ZulZtMW'h[^sD]q[1m[N*S!5PV&q$}uzPmʟ˫V#5΄D/RN"U{q-c1VV+haDZvP `_ytǫ/(aao{p kd\SUqV3J:3Y$]h ao"Pk4 GP)I rU\ 5'EP'`l^1Ee#p|u ZdBS!C|͗h&ٵ64y7 Z#DҦݳ0?Ԍm*Ƹ ͛hT?nZ'Z>j_D*Ƹ-˗kdSVMK*lRg.yreXæ^;(s1Ψ=;o:a}|`gf<<͞iB;N%Hq T1cE T?yEɗB<_LeU $QjC6f#a x?kBPVh\ekNz 2Gz0S ,yMdO˴*~`JWmVDq]3wQD8F#uzk*AK剎RIW\먱LGgZt4;h3^Kp}zwtnD$҇wܱ7JXҵ*x<t`(m*Q踔RNp:޺(H]:FSp;w)g'!lTZv`ɖ.eF4,@A(;rC(-?ɅW q>D|k"ց_zh Af8eE \OU;c^m|i&,&to~/(yOO8qk+J$TJh]CȹۻLplìy I%ڜx&GM:6GKt2E:Tۇ_ڵn5Ȝk[*x!|`jif;PI;M)fT/@Ga,+$oL2+Wuq%YfJRZ$KfQ:1Sle&bA]eo7DRg+$;X 낀! %jKqRJ#J0aqy܃I4,bUlV' U 9 7Kt0LR!ݮYp JҙaR@y}jӥ|]FF"Jr ^q1KɒjYbⴇWBj Q$%H[q|XW`4J[%GRq$#n _S ^}b  VRmH(6% ܬL A0Nq_Aub7+V̶ŀ$/{i([?_b;1 ,B*)tk$%6ߗ6Gk'V\Et;Zw/vѝ.,QOIF\$a^.O{ F)SjnC|?:ZZwR} EDC ! IbzR)ū(qڰJ8rrgwGC† ̣F{#b >k(M5۞>=0}U10$k-5*;Ah0`5;Vz4z̐57<УLJ&U%#SpOµ(󩠖ӭj.%@ލ4LJ.kx`pow Uy`::̵zEe671>]\#ۺb9 ˗FQSbqozFBצ˩2_v Mff5a M!lv?Ar/,,$1;}dtAxh]⭠0䓻$s|3$N#!_9ĖduFĵA D8s> ɽC WRO.&d9^weX;ne9`Y[\I*)pwt %vPu!qpaN.i tI2y3 %+$ YЀ}|LP8fl$Âp +ڄ5,ċPK(oj)3,F Ç/oۂ#]"F- #gb#m)a"06GGb-c3f {6.,3]gISdGJdҹ4ԢMg`Ec`xo@.7紁[2TIƜl1 PE[{ Ӄ "rֆR~ RZ+g,0Zw{!U-Ud^EF`JrUq H,;E<OoMUʩvnm@""}ɋt(BݣN)n|_PhkZo!gGȕ{@ߏ5ʌ\š\D-^jԼZAۤڶ_+ޤ*с!%L鋕JgSl%Pb~&wtx9YH$0VYM82<6M}ѝNK?MxMBcZ7+5sdȹ+&μҮp2ϧ~d1*\#b:~20fBTN8AO3rW&g@K̾ݼ/w)FE8 'O\9YJ~l#evG …C?-picUB8L=ܢ1Iy>l}|YBǨJ&[sZOvXdq:{Ԋi4/mx<(N:TYI[q|(Lo㐃\I:ti[M-A yp?-rs~mXQ4җ8ayC)p %骜c vl/YN0U2VCVk)+L*n˘Khtd7$V?_bKId3ϤQAhďÛ7)0^;;$m*fq!SooJϖcoZv!$.՘=?R KфbX6s:x+L- N:K/,Ni#ֱ^4uSVcP> Q==r]9vo%"=nB8Hu:$* $N8A -$c(=v=]m (R%Lb0a6pIq̆w "ƀB24bR?HR@ i{Lu/ 'Tĥ7/@%SJx¬X.~ A:[#n= k9U`3Q8QzsD16zθ =Ҧw ·h분G?"/Cgw9AJ"_1c"|&) @*1)j  <"b)T!Y[ F2?|:4)r[U /6JmP"qFV'5;Rɩ`֠rIB?9:J+*ٕg. K#uJ\(T#+9(B[;'˱dl%g%pl|Z2!aB&6Xm9a?'ћZ0BJ$} +߇mQq\"Y(&1m* mA\*&-4'HwmD UR[.,U[ )cur[,}VVBShCԥFr;K(G1%&DŽ>,Gi;!"P~0PP/DH}Iָ>~\_?~~Ofsob n颢H[*&C7wA1܌^/5qA Rk$B*\/bFI0׎iOa |yX 7Rojհ<`68L!:/DKb(N˜M|;t6F^Ņ st/?s?~tHD,Fb-gN))YL4)kdO S#_TE#%Y Z"dTn(dP&1Ɏσ]6I)+x*??|% i6I)g/_/_^Vy>jT;D`DD"D]YsG+ ?DIu؇Y/;3'/N  B$R,\l\d5 ݀a[a ήʳ2 )7 ARV)'k&()Y[KI|rJ9՚w+!B ̧VJQ49=KYB5kTEMxEjF"s#2  hl?S-6X3ŸdOO23h-xuͨwT3$EB)88i8빊zp~q~/i%%>MsTY93Qh%; e$cg $&#|%u{MfyǵCԸqY-\_I( oO Hx|rAr`Er`IcJoZi2\f㕜Oz$w}>?My\5D!b$}MAb/ޠ4^=nz+iOc?щ}!ɹ_D,&%^ȁ,Bȫ ?t\։k"-|-UBMyI+=¤1yu\l_DkrnU/(hxy" ZAIk\o}Vq31xgmjϤ0楅""'Y<_rnŜ/$xPE)bg bՎl|83YІYT?^ջ3ɺdLa4c_`W^nXn˼XFEr\AGs*/GL@}<W 0 0h','Gĵ8屋Z(q2f%'Qѻ䜳QW9>>,J=opQWU 7>mfA/_8ZLsS.> goxő˳:;F%>ay o#7 g%N fQMS:h6E5&`njeS9q0! I|*ZBUZKa(k5>+AdA46[+(GN4Na1^h}\5 XDFθD->NG-5ƚ| Y큍il'xծ)nz[xXeĀ{Zj> ,"uM5EJwPStcmh<Z+E`6 ˩c(P99"A."A.46|t9luj/ QJQX! pb}JBu>z%_EJZ%;ǜB}Wǃ }<ǃ }}׭)uփ:qTz9jhi?ȝi?ȝf}S al|(DgZ/:GU,Ah^O dƹb5TKJ ?sb.eSr,)T- f=&S6?mV t3e6GWZ_Z_8.#omv!}SQDH84[c=Fr*(CZo% ^2"Z/xZ^ʃ.G׭)hŐ,;! FyWղd_=P'KQ:j0e"Xp+ADj5j_tD`s/W(?rj?em@^xPKڻ77qUCr 15(f q&VVm"n'[Q d'-'ѿd҃L2EKHg'jE2&~;G)Q`vXpJN~F*l93dʆ=5R^_2N%{X{%P w8~[j 2ʠ(j&FɞPT9(#*6'dOk _T7Kת`[6l%rF .iQKh+kSADy˗߫r`qJ<ɷ*ח|k"L'X}̵_raՕ7}?ܜੂ+ܣuuqӶYWd87u_']6s ِkF |NV@)bE?IWl0z 9l8K KYE,xt|Wx6fuMDjQU롴ٮzގk^OZcѰuʉ2^2-|h1exvIBE\B?GF58%عc"ƣ>Nj5t7~bȇFnKqMan1NYla82 orlyGa3 } no/w ~*t/-y@@0wfw.ݒ y oWO@oq >?e4lF WC: "J6ѝfd#t],dTIE8o?k@6ƛ[H %AntUss9oP7qo06y_a<*q{)l!1Z?ušt:KMOgus6{:@};1O#NS|P.2rŃ!DAkLjd5CMR+N/o_!%ځ̑OKfiHV1tӗYc.:tq$ B?lj v~Na\T/;(R(_3.@xP|MlVi"GuR71zjLsҥ@sةՇa<4yET{^-̇7!N7| ye'{a:CZs'is(:ZRV6 ##{~9:HB:3##E&kII%D֬0:ToӽZVsziyƴ(?ZJNaMpXPٲ4nWi4=}4ZrWuSAtȱa{eb-UH^v5JfՃ'kwVGFCHopn'3n_GN- g謡5<6hRgSrrGh\+83g a I@NcZWR{tkfv6/gWp?mq-kqgK?Α#Ȗ-I]-Cu6o5!M*HGn=Ԥd=&1x tï!y_zzpkS p0X] JO[p̿),>uq-]AtuV%y̱nB'iG5傣td5N~Opn>ᕲ Gs vn s*.Ol֭E}?#(3=f-i^77DҽyvD"X*d% AZ2ƨmMM8 &%q aW0&lwvwfqJv}mv~Y؝Nu0;.ɶdx=qM9K& C8K9pKb?)UDE}o:6[X-;d"`1 VOb  ? acEc dg9,%$ı(qq <^?~÷Wq״Dk@^=t q!rD&u8Z%bę\:fFIS$ ]޿N=MKlBăYzRl78o~'\Fz \ toSW%xGeVj"zՠڳH8OS&3#{œO\^HH$bq qf~C|=}H9& }>ԫѯ&rIƍũ5wHwAYzAOV&:P`=heL"Kcۉ2N(H+0l∑LyiD_k!yۭ: Djj]!ۣoRPC3Fy}H s (/'됚SGG~1#w9b;rCB\+zo) ppmG#;<` cIG"R990lq4ԤzIM"Kt'3#`r-!l`owV2^ d4䢲Yb26!O3Z1-JޕN\.0#`{6Pw x&cFC$tBR[%n#a kT)Ei=݂yw&9w=E7y̓EL')=-˺PL&21OSlo.g:ơ6+T}M`lk`yi8yy^@"Z[0MLѥ2,$t"+Շ7KT;SVG Mòz#tZ~S X;Y71C;Q&͛q"b}b|WZmeL3/كE& 2%LYaڨ*M yťN4+xM`?Ӻ5N2WUkט0Y$V$%&K,zF\)iݎa fw:)F}- km:FS~ĝeM57pxJ\v*M) $dW$/9gϑY~Lg7 "#,:kUH}2s22ez>i˛;LYD0a,F'~eJ-q_&ɿ/fqZy uO 9{zO~0y JT+SJځs-RQ,9BؔDD FF .[65dmb"aʾ٩٩7'pyBFY~w jE)Ey $`XT`m =|(SC>,T2(й\vu>^R3JLo` .<;{3*!& xۿ7.*X)ݻ]YU_2=-7T6 JxoNqo/ L F,Y9> :ϣX;/mkZyD=<;d " "$1<,]nfS,&j"#!\|X3o+UA(s9L{_>/w;Pޞ aʬè0Xums#73tUU:z*yl% a=E澻{Зscz.:ߙ 4IOHj7I0:k7 r/ջ`>j`P^[ H.#_Ӈ Zi2AHGfent>,l?_6/,LZJuPh1{.t[-2Nג&~O)TTfXt*znlh떞 }5Zy * ^43/ŅϿf?fUURe/^ Sv9b6xpQ.1E"J^ߘ^ x?E5σ?QF/%U"2ɾ-9h/ O_OzʿΫ/O~}^e^ը4zOxgMӴZEk|l+sUoiU? 0]cm _z0)˷g o[ :rU\ȹXТϦqp )ݴ:P,-2(i5LC7̴y-K>a<"(Ұ!ןh9xv-qOG-2UN/.JvY8a'@'Kn ޠ8 e2Usq)nxP*LgҔM=0aSrU,;犈F1<"3-i=~  שͤ)=MEFd$T3tR(;d-ݣI2az}8֞%aKY.J#x_Mһ|^>p869o턲[d <=GiJ:uʖW" kְ(k !-2L*Zc=vɄƦ; (dI'9:q=a񞥱)oJrVG%KNR1G厘MT`c|zֿӋ׿HJ|i-GrqXC}!tfE,>C.nfmV.ߪw?+sW֣foQRDH@ "uE/TXӷãS!##~:Cց(u0WLYGٺP-l |O~iq0?잗O 7|AOBEb/*Md$- \ĊgY$7EM6uK y#f>Q )aVT۵#5rm^dz_&V zHԇ[!W^f/Z=u N7}ϸqQٸ>TIUZBi7r(j?GC>Ù$$~{#;_).\N 54cVCsNDF\X0F ~QVXWE:* z:}V6`Ohʃg,Q!T9[՚D˹,I 6Pފ+|VR|z Fs;zf$"ehWZd6DOjSՆ"l'输USl8_](ZAq2_i}EꜴ"^;Gy#hL&КHJ\uk2^}=lM/ow6M@wf߮UMU**vSX&"F"()I,xElY]-V}H y[ĘoILbŋ؃>t.GķjARx}\tiٷ%\n W5Q3׺Dbz@̂hqxmﺁ 0uF=IOweyR\75"K`m>l ' o]㐡s"1ڞ?+D-2¶A>^F86Q گҼV6~?P;EyKr+3lUb~>_aZk#0HL(j;dkc Dܳ?tXE9VtHmWV8D]Q-2Ef=컙M]"EFxwez2BkM h)v%1¼ۨ7 *̀ڹCK%9ރ\p5F0f@`Zܔ%mX36> ˽E9ijP췖-fxhqr{35ֶ4@71"؈#ʲau`a1Y*n2P-2HHBj7VxG(/w{wU#x2xV~}((ûE!8JS}B!#Zd Jr`kj\jp 9fk/8`ھlꐈB?ZdB>DFGCC@z_q'gJԇ&8#߫:#OCQ{@k;7˳ |1p}AGƇ́EqaXe0^iszD\x񚟓F"MR#,_y=EjI[/f'Ќs Ak{M_B3U` -߿~FkvQqԥfzÀ-rNC\zrIsٽuЄpiӜZOt0aV T~22/(_̆ܥ-6쇂djssw?P c[d|2'LuST8/C E!R\j/j=aSTci{UbPİ?&\xpQnPX-&(73)~2z7Kɾ-9h/@FhdiP_%g\UX>FV+yBF[i/p泦iZL`"y>e6{siU? 0]cm &,.Ͽm5ùE^!=2Ό+U0{9zP:wޅ F$0 )n"jF$-]sy2~/,~L/@A $^GR ˶el n>{L_.:<\{#?{rSa ʼn[#c"’И,K.8'?/=FԔU/tCT:UYvW9{jm=\F l s&֙bչ6T0f?;"x<҈oe1˄[…NS<gd$)bI,ՌbdW~t4RZ%u#Qg.]k*c{.(Uu=ܧ]1pHeT3qxb\9<\ǝWV)Gz<6Ey M?0U:HRvH)ؘnc<6%49H0+YF-Pa;A%j#Or`ckA)k"Jxq YI>ܒ YV>{,jɐ|Q%)c΀bޘ΄E)%U3j(sfzt1MQ?.F0] ŔQu1>WxXRҕ^^Nss&,±ô"di%"Ik#T),4n BXs dN)Ifqu˿?Qd.KWNyN77AFKJk@^aQ&Rtc.kp|YBS)%LUA,7Ѽsȣf9P_MF!8>CX}"ypvZqB)zKJTrWs ðBXi)!3VZyN!S!Y-gn".hRǙ!e$)JHTJ3Zd0(ܗlJ325 W"P Xgj{_1`?u{nqT)mub7f_3d[v(KD]F!g g*O}]EŰFcA<ۨKU_A, UuxY.$3aDu~ge%8Wx} <>7z`_Pn SM}, YЄT/"05XSA*[7mb(NG@>`w̙+,W;^ \1)m;*lL{4FkrR,J`99;zgՇc Yx޻_ÏdHM{Se5qn*[ \(鴵h{ӻi#LlcXY8 X\e"PF0{`QWTDuRQ8h+ }`곔&w񎥔Y9bYq ՛, ˷cOF9(_/I~KyPfe?bJژ| v3i2P8A·J&>[=f%}0-ψ?}쨓yBvD O%II3 θuo{<LJk$K7wĂOd%jZ l6|8#j&5}Bƈr>@Q88 w:vuB>@(*h{O70ߒ!>@IPg83RsU5.'7[41W48ǟ-VRy#s/kReY x #MDZ ɞ9V86TsIFOq 퉙Pyl R} $Ӗyh/#ufH%\Zp%"*FiCύgdybn9]_~"gǺi>v eXuiO9Q>QhXRb3rRKcO|. 9)gbD=8Pf<>7Bν KIdm[GFbA/gL&a,4D]tdE-Z:c.m /AhAEdϕ}3SFhzխF;{h߅J2B~O.-*zJb^mIܔU* R bW·b1/A4Mm_CP3jz>:4JTdB4ξshbCQu=A.~܈Zw̖֪K#v^Y 1cd-hD5-Z4Z ui>U&_1^Iah"ǮK.7[k΢}'Х8d]*'ɸC\JSIr9ԚL{XRF1ﴤFJm$eF ]bOu[j8(R՚ ?1S^׀AO/N]z HPPxP/4TH-1$jA1AKNkRN'JRʓ;&Bl!JuɅNi€J"7$M)'ފsrRk7ŶUJ*AZbgMiT-5RI)Id`#>sT0]sb(T;3Et&mQl5{+yy-yCͪ0-b R c^yFuW/2l=CvhnQ{X5[ _m7Ydp*J:~vRr{=lT]ѽZF[!ᦥY( >%_p\Ʃt|v8@>C|f#| vp;:H}`k Hrvu8vx3MWWAngx޹7"{/R4cv`-+_+m uDbf~ߐg dn#r~E0qMMLGȯZ(_(cX '!qTf:s qNjiT':yJ3N@e,y?vE8 RtY24H*Y1K6̹,&|;Ɠ-LRxeI)"D2/_&6"2 sjTk^~AH`{PvaAv\F=o?wl@1^H1*g X{l_qn]ٺ C?FT4s3gu"YLF}J\fM(s?2#b~<}ܰ#PI>B;h?{DX*T`IM09B0nPr&VFFVL1kP}_玡pׄ^zꞪ5Tz cCa6t#c{X%3AD@?4Uyu&B ʷ'$uDB+ 2;Wp

cNФPx*\_GSx2/?㿯W!un*[? J:mm(E>|=z&\#$q cʆ Y7>50>y|"Nin3 &M24KLB)$BHp2Y㗖rTBuQ~?ZvzWwoR\EL2J@(\oc zPI!Vs#G 5&F4{h%ejp,:3gmHZL'Pǥ'INL4&PoIr0Q?xeQQNE(EEJ1$6ꄐ]b"etM&q 3'_Rhkѹ\)dݼ(HD0Oĉ/ UmS@jF~FX,$=f2iCXd+3W?޳R;hI#9R5J~ vlovxV&C7Է5$ \&h3("@7FE2.e$K/lYqmN=״v7Y'QN ET i( 2nOA 0} n!M,(G&ho >Brv,}p{S4}]ëM;,T 'U"ꑼ.;_RkUiy[Dr=:?ifBU =WkTU-x*+|2}i} ! grMyO^%uHZ`AebD쒊,?326v-l#nM5 VGV631yEkT/֮_yf_^#{2l5 Bz52 Fk!:oK.fI͛N^DZ>\pguzۢTvjQ/Uŏi*&?n}o<rzs3}=>f/&q@v\ o]l96^4 _v0'dJI&w f ~%$a(WMfZtw;Î4-@Cx$o)+^=־ lkmKciruG +ZBa&Ua5䖪^Y0 'WӀ4@3NٷwZ9C[7oƾG0-$Tv/˙Q%jPU;?j𙋨 E#FH_Lv.C{2)0n/=Jᴤo6}ma0f n/ 犳wl*o܊FCxPl_/TG)G!=TWȕYO]{bG}*܍5аi ~"oSZޅQѽ }@XJ-kw)|e3` 2`;@lZ`n82`Cl2=M8Fυ;n0=s|?MܴVq _9ȗƖgLʄ (rސI?}/tR qe͚R Hdx&̱c i@šGtHRYgemw0Z>[prbT]` bšX,0wjb*Ε|^ W Y//R͸Cb@HC.X &FKJ%'qA@]܃d;X\Nx>|RHRHk\ů2WlmHǢǤ#8-?m'{Z!TUtx\=-sdt?zبPʔIbnJB~Nz>V{WJQn|i=>F>U!ZFXbI =xN2,]Gt$*6̦ 2(/ŽkU멺TA/R,Փ9s6LZ2q0L %¬1- _`bCT2qqMtHH}ȄPRcUl*+ ̐| CgkB }αQ=`@?`FPA :l ?oAO#lTOB)c "0c , Xc;'.\歏!F! ZOAZ/v@[_+щm ~+I1=suT]a/(UƵbw}3ίlI81UQ.T8`{ -xb0$d>HBq :O!U/:*4(5$d6|5 K1O4!4coOAl2$Nklir V+ LN3%_R5kH=I)V,=;*zڏ9`G :7^{\~|^Sؤ͆j֏8`3c>R;^LZ3sa"i=W(ٱƧYRjN˔~Q)|k.&mRg^ro.]7/CO^lTShܱ¨M_X[ J :C>J#:UcvM:Wm+HT{mNLD3@9+nD5EPJ觡7kSGc1©_f#˗yGi_2}N Aep>O슄`D0X21**t`e{ 4RD5gWG*\̾TJwB_>oe}|54f"9Yl\Yjd5/4*4Z(i>X,Reߖ,ˊY`dlLFr4iO2 m w-ӂ!r{q7PhikWspsC֛i._ BK'|֌fTsx4x-d=g_*筐*=d l㜽Q> \081&x_39qzlR~忯; 4n޶En佊O6!x uZ;N6E{A0+u6 f̨}|BsXSeBQ,KF?:1đLL'S͐ ?pr2˃dI}yXw/[<9Y੍AbӲiz}ƌ4ƚDX;;Whz]햾Nok@MRCg/ffttco.,ϩUPIGKQAPHp$'Ȭ;oP}}7ՕhM704sq_M&v/.k> 3FF )wqv?a@,bОߜtP?Vҡ` d,;ƏupDj|&I9y$̂n_Oߓab"c("[A@v2@l/MZ}燺,9:ScUugm $+]qYIP1mB!gwZJa׋(<炵1&K= g2e[c tU V) x^Fxs2Zt8k"#jFUbO꺞6x-!?(k:CөSOU9B# ST Q$C8hYZ//G}b")s0.Б YJAhJY;m*)ꋶc/^#Jʔ "#A+P8؝D\"o-ś%jDȁ(dVTE@"ւl_%ބe[I*.RejPj!1T̑A0~-+O9x+Oh4y>I@'x0۴"|wU%6ɆKk!f}X`J"W x6alr nWU#eTk&$*ct$^a0^ϳ2fUs]?^]_Byd3~35{r?qyy~]Xղl}`F_]^*f҇?<3 տe^,!OBv\#]Ϯ+9__3azywO_n..x|{w{XNj$,,N =){Oep~~// W3cS3U+54/:E_yEZMِ#j<#ŚVACB2{g>ίO?Drqk{,{U~句44]6KՌf6f^;kOkį.=_⭙^ӵ7k43g`&nɻ˛뗓2 rs%iguY|%Y8?N'K#^(5J愈/w<UMNܿB~}ؽ^n5_^Ntr9~-&Yݩ|{tn,{"ӷ.9ߢ*kr« k4NRLIe#mdL;4GM21,mi-JP%MƘRDŽ%j[i6ѵ {<>iƺ-:]`o9->3 Vu!db9a9P]]K;:h$Y[1'$iNk  0*4v_Qܱ/X6[/~մV.(s?Eͼa q1AE9TuWQ PN.v<,X^bv /u@OY_ūM4z6^6t#p s{՚w/le Sx3T*I *+6ovZA b8Rˬ߅<\śaS;f)}p͂D ASM`V{k<;F,![>\S~;u;Zax~ @ 5$5F=%K{MRN(A@%Aݓ47";wV0e5LH1U{;Y%r哓F}:F|ci5?OW3HZ_goתީؾdγEm2)7Bh{}3= m0[2{Z s={lpv}zr?|gŨ=h!F$FtqۣUy0'H:w$Y>oZ yG Cwh<˼BbK-]?l/!=q8894J69m]sJޓ0V;>USNaQlUyEŻWTiV%cTN(|6lf |JL9SARjT%.j[تAVЇ,z iKc/Ū4dQU( =SE bxhUVe|cʯܪe c&q U^9CLchU>!P{t6fMpv`ːͷ57\ ē r@,*:%撙z2gs>bPl2E_QˁfqbHSz ( Up(oEz5}{;'4oF#ֲ/Jj9%WID; j YuhC[ ۳w1{ZV}@[s7Fcxsuza}y{YW>q7pn17P_% ;`H{"U ˊT6f!K7`8ժ^H/>۫DkoN&lmki--mۋZ =|3ځM|K>)zOoS`21K@r嗔QQeލ'HB `eD+'xƬ We@zh0}늴džݑ?E-SOܨɕH[ ɗz%~V%'r5$("T2_HF[ ٜIXSK!2Ud)U)HNE/cH<}ښb@m:!I-Ad'ԥRz>$WC* *Y96a5flŪPURJ뜨N6; $V3&a\!|H;ֻo"gthj)G6:xzf $bt$^ּ6XF YLZGٔ/)d޸ #h Jm$ڸ,N47'*=ot(APR20Y]qc <(76Ru̼ާ|d@JT3Ċ;VL^H,Ac.T6Xnj6ZV"M3H4Ӛټ虶)Y B-ix1ChcpC-G5>dͣ]X{um!߬2vX1IǟDb\/+f֌lk*T)kn$%2JՌJu@?uI)ěYSv[ǫ]N% p8dٻ8rWC%H`^`_bf,&8c;|mw;ݮv_*dWAQQ}Eɾ  1bAϘY'M7LWAbKrV[({0[PTb)fC<m6E9 Vq@)7!0-IW9YX*vAy' ZI7C ktE?)nLh$`)vRO?yI*txVE:E% nu#8|@ɣM)7A;Eqd)j9rWpP.1p|zv[kKe8GYxP杻UpYqv#-:'1*`u}Ԗ)ZHxplj-0R S N 򤞞^:y%rYIp\aIBj2:ո0D`U&F52qNܥSVdHuӎ@-$m:kC/e6)<3w=͹Evν.=Fλ]/k ="RFqvwǃ+XTW w\r/YZ@`+i|4bXA XՋ-Wz!RVU=!u y&wu@:ϐThkXo[$92&;o-~Vg>(,xB/) +Ş j:sva3JV}4Fx94Fͱ~13Zjۋb3i;e%;<9v鏺|hn^Юo@Af0bЗ]'S:u+=9~ٜLyv6ne^mOn$:\zPVOb/)kmD90L֙pŽ9}r~5;Ӿ{γ,jVQhg78IzS F GqPgqu~}<`tx4syc DZwZsgsCakz%55J&`-?h3^Z2(vM­bJx*,zM5tQ&1 ĄeHruL䤄*L&BovF>BF>zG_ = -N~uOCUb'wÁ>kR~ zـ X \qlVPtqU1/6 *=X\ǨPB3G!U&݇~; b\3 Y}hjy]a>dqRaO\1G."r7!~ K\naߐa=hx>!~PNDe9 |L|.GGad'1j6 IEX wY[?h7eO!',)ѧkj.lj.]LN;5޸Gumn$:&:B@?g=R_%'=:7#! *X4'_^xr ӧsdSu R$b*iHxwwvτ׵רY_ѻșk9;y?#z!0K5'|*J;Nngp]g_n #A@ -|ʘ!/m0Gˡ_;Υ*GG(Z,LqށM*tuÇ&*`@s($9sȸܵmj~?A2bO۬E{V9Kl,͑Ms)P*05` %1_mVy͛Yb o)-;gDřEE X}# \ dg*f6לuR1f }I1賯՛X$A{>5E~1#ZJ<8bs@l ʬqp ťZM[l*p6vQi\M06f8~ kk]A>wkb<*ΌV?>ӻt̆϶嘂ie(bN$#TCSr3Iג]k,Sv/RՅf-R4iI:`clRSDF"αAf-XgT%ͳŐMoW4PRJzm*i1^9~^P\ٔ=1霦9(Hx4oR!?osþ2HxDjUI/Ud!tAs"Yg$ˣD |xڿhPR@/! ?Nt} ?„d)]*@s: 9f:5EI\E7Clzu'{r\cDE@LSBJPaNt%KnuAo9ee{`FPL~5 ^:e6 B)=u7g ֟V +RaʱYI7uM }?v-EjrYpmb% W_6^SÑRD",5%T|Uzm+M>N{bJ_1PHty=$r @U0Ns&߆\:gؤ0P!:{).aJ+R}+a5^TAgcP"j\]L7 gô??½+ۧH,v]wM@ˣ}f{o$:wx/On7=x1]M[@ xUe^*Msi̺]f98V#\P1{w0H8CJ+ЀAD`lm+%,q R7ť|2{&HD` \< 鿧Eb &"rWh!59`j?6 "&PTŗĺl%zײҬ$Z=xSK_ɅRLJYՃo? trr6(d׍:~sFfwPJy7\pw[i!;ɧzv|s}B.ezOS fq[D}A6йĠWfh&foW ^ZWJf{]u^[k8 *xó?ZQx,Z~?t~R۷S";҆n][gTG#/0٥~nܬffI;4~s~;\[/0%wGOyLMbta@ShNC7ު)@.0ۖX"ٻZ$ɥZ/JUȎ̈́HxOc؋~1IoD[(o^>e+N%D Ddeb1G k5&2%&]Aw3\8Pc" mKJ>՚`BMS2n(U=LFI Ƶ9 K(1*YM7:z-廛zĆU=]MEg:!ƘwYqkkAXh" >c@X,/qqim 8&Cvr1# f&W55suIũS/ \h=;*E*1[q0ٛ M ʖ Y5d kEB/hͭsʭ"*! i5BGZ݆ fX7DJ}1 $ f=J)2ߎwZ*cȥV [ bE:ӝ J5՞|C?A9+` *D C/&D6)(D| D,!Q\4h(Z+']+O&c&ye(8^x|֫Y|')r8է?ܽagmM~:zcF@2;UjT0$>Z&{yd ՛"=ON:)W9?׎heibt6ۡ pNaR*6ù"JLkI~U"HsB9 \e42.eŅ0)& ")*LjI~梅z : #KDYC -o'yg䅭oK]{檭K=P^u}q~G8:$z;tᡃi; ewhEcfW R7\.XNتJJ,N N^1.dBP)8 %sChxE{j1EؓŮ1&%HIEB(Zզ.BrZAIպU@Ka,P,3gJу֬p[eShX-48dBFf.k˦u8>Rr}Nsd_ᣞJt=F|rZ&+^9IފxRD]P*8JɔꓯœYFg1Pd8>`._ݾ? O+dIUwYi,,$^S3Se#U2$W0Ւcۢj(_J:rJ']Zr*XL!*^*dZAKQ:ksBۚEƜs4ʼn$uG?oʵL\. l,@o߆ dJΉwXdHǁ)koT(d%B:!Uxf⦅n)gu}wƑiҡxq;qr0VAɾ9.XULrM]M΋dԔH:+OM>_6,Vdo+o["r(]PtP Y`IZ _e!->a`[C_/QcXtG*AfuAfnڬ`KwOd 2@Ou!]*qVGŝ!v1wdܜ"vRu9p.&q}1cunqj5FRFd݁-B:/G h4UTAM+g(`q#z2X١5:EbĬmILQ$"ԈACOuq WY,5qਵ&  :x7)]ZЀ  F'4^:mh"Ȣ)m*.QTq1t.b8Vģ&P'Xd]Ihi7 |(on zbR?;? gnB* ]lCۻ?3-ťut2BPw3ׂ hEVkL\to߼iWBpbbAF_]:NxUXUy19Hy,r,Vw&l8~}lc+w.|T_XGe: VW:*Y1`j2v'o%X*RܘB"5 ~K (CCvUeIfHHxz$UEԂ N+T)Rb|TRķ,^i`V0R2zmҧT+>FO +j͒G]=(,NLؾ>s='J(f^mߣ) ],Ɗ)#m55k4W۟Ifn&amq";٤( =)S%E;νwbB~]`=Mۻ֎7s:[wCՑפюԣzEkYte.[5l7cޘ_: C[OF ֨G9XY6f Xc֨Nrf>BW`ވz+nuAvرc2Fs#kֲڟkRaz,Cw|M)M~{s"Vn?Wp,=QrLMgqi?'5vwQ1ω"*+w!9_S"bsF+*o6+mXQY,P ,6NE5ڊ>RrpN Vqy~{g*ߞ4MtVo?0͉k[ovv_rEmUY"eY&1#uK:0*Ce#k:Tbo @b֑Χ[Uc‚\܄ ˃o1 {eN_K<Y[[a0+dxʁ3k?rh&׀iE S\uU+"&9hy Z!eңꑒlŎQ lEs{#*,50۾9ns{k 'jtshFt?5sCY·K"@BhEcE@԰5CyHb5|kխIR[<9Q_W=~G/~UXfnװ3*? e٫p>/ܬ,ςXe;iKjNtPE#^WsZiL͈uF: vHznZc!ֶ3fL Od;K9m=(D|m+u+*Mq^. mwatC֔ZݿPLmz6S\3a6B< !t*wACtrYr?*={)#F#5pmdXOghc}2u l=EUe3݄eh7=T`Ya2srd Dj}+zs|d00ݝ0j^{@_dfwgvw֨Fns7a폏c";ף*G__sO4~tquqw./ -'=]-H?JpӴ?T]xw~Ջ˻/|+Iu+=ڈi/ƯL)2#Mj>"v}sue|?]~nb}s})O;Ĺί kep`;ܶ[~/dۦ_ZZ%3~niMX1ɲ]X+.>iO:/$(#ɀ9\gCIDA:tUuCǕ3xZxq9#W5iufFMVE Ρٿ>6wq3N:RQb V{ǐ<+(ScՐu%Bhsw$ Z#1T~2qik>p{'K? dA}'fP )t6ۡؑJ)ҺRͽg΄~5FF)1[/h-G2b;L aI'soۡz&G ?r}c֚Z~6uۗ ak\#p;/́C|+Th\?wڡz\&k/'riv~kk:Y!P<FcR1";ByeQKL gjb~iK1'RN6HRP듦F!_X$^l Z/TAC5H3#on^kI,; kr Z[K1 YY+:yhF33wUL=4WhѪ&eC.Ck!nWg:߬fl,~㱽<3A{X=n[-diTȪ|HyP[ ܆yq0X\͇>DlNOv?ڡ`HE`[`X s.H| v!zgvN!ۆy&FF$nyBjIvT~-չWKO-W#&z#HPa*}*3$g|UN#c]G!{%;="CGVV쭆0y1 IhG͇`No/:Qox-nKrwĵhZ?53l[̌Vө^8fM}w\k-"V1ɵNBs޻2FWZs?s6|gIWHv! 52\uA˭fBjuIeqƍ.۶:+oT|76/#5nDMNJ,Yܽa)Ex+xf>jbנ=.ỳH9ffyp( K âzՌ:04o 5{oJ@VFfBdC"GQwX DFwPn.·WG?Y^蒅 j,0UȴU%UI*1.8$ fyY@,MЖu|33UmR6C?:ME8}cR[E* BޠMm,oq zBGM#۷GQQ}w '3Fa=ʯmnW#~Ye͗oo{'wM1%G6*/wPcb+]0ndX8 i"zIE}YѺB`cSoobjqƓܟ>wxM|3ydKxtmu]犊P?$6!U'\0l=!qB/+vт 'Lp>YB3;Bg,;M3#A,މ$:ӆo)M+ cjt})E]X6KV}Ė6)x ,TFRWY^s}buVm$V~{jyWY׬O6Txr;)*+&7KH*)j3u9;5r8Ba 5!M6q<7&M1d-yw5W?vm.b58l9ܲQusl o(m$-]ۈ:pʑ bA,F6)nӎo6U4]uxSm]RDf}{G Սiޅ_ZH~yu5)Qޠ(oxWɾdnV},a_쓙YxUFb/L+Oa!pZB c7:7-\L6uZ={{O<~:-{6w}y/ v6,_+d9G%XrQ˔)pΊcWʌؼ{C]_ETtR'=W ׫RN*T錺M<<,RqZ^0Ns̡fP+#k(G]ɾ?]kXֻ8:\?Rw'zNcA-%niQ#NXB]u cLj :w%z.kCrͺi苶Wrɖ ~_@tIv+]q/]/]nXUdGڲI\v r)Oi@7,!laJJi7>9pU 0h\PEVVk˕Nr Jz&3DE⭋-褜{4zE\'yƷ.ړ{]).]Cs!x/)ĞB신FoͶ'|УaZ:q"V_CX߿~d*?\*n/ՠX2GX<*bo'x׽/a>j"5[ ͑)2Onj6>\"G-\%BI fن#BqF %Prء~W*y9HLh L%O2$ZX&N=$jtA*mcNʔ 4j:I.OXg,LTw:I9KdH-e[2>N*T錺uy,q{t)M vkJM6e,֮ˑɚWV=^nun4G 贆K dF<]dJ~p{gO- +|kt}=gWa:k#f.o6:6@ָRw't%-݄FHE*`Y& m92`gޙ!~k`+a<21' ON?q=&8M~Rw#%_҆6xgeq:!آJ]eּG\{ulm9Y<ˀ r.W\ 3DoLN@qŠ&sTwA!NAy߉Q'ϜLJN` _}7'#<F b)"h98-ֱ"6.5HDߜԒg߉ "X@"o<<:艨,>٦d\Ao5Wp}]\d'H =]hA?sHÖ<,9H(ѣ|#\D6CGVV-sqxLj FОǙ^5x-X\qE?S:^YX'ȋ:m7}f! 7FvZaYp:űgͱʹ6l&0TYP8Gq^gX,V^i_y3x՞*Q#;H drJ548EG$GێkMM~mC,6G;bsfs!5PeEpEr2ϳ,#0 JWEЙ ;e>˳=L{v4uxu?5ړ$%SY흘s4$}@{|#>$*"1Te o<OKD9 yMfXL9O7 TŤ\꺌NZTԢ/楖FS0̊.t =<`**U3WB(Ae8QwE0e=7:4uGo"ŒAooq&-ap2|75 1C1ۘ4bEM.S@T9+4Ȫx~Kg&l]d k]5* F6N (Mf|-QD6vYz:WQn/e8sܗlrY<*2]\ڪ..>cVsѱ8X4T]tcQ1yU)T]:ubΎ|lYɾTICVWλ(ҋ&&8Z1Т\}/)BڞV<C~UU󫪙_"ƾ>ba Z tAB|e:W!ZvX3 yS8\

'ue/'Rl2e ]z9fV_5$QǀtLc< V7wYLs ƓAdUrG&/J} H3(ը9|cE}H|^̪#uC8y}IǛqvIsR B8e [^5>qWilѭ՚ORZH%T%zirdsP, W|q]o3fSVsK%d 0|7X؟5q CY18-j֤hṀ>%#8QO#T41r-&R+M%+tfn-Q]Hh(ЬmPPw7 p־lr5&Ouy&zW We >\QLl1xG)R(`>rEd3:H+DNh$R] a0:'&_H 3Κ|9SXߌ nKaKL"Y31 xc*d3C|=&մhSa.EɤuyfzxP9ίD&ciS]p5?|xbSx;<g^1}T&'9fzIT{XDu g7XGtz/:qȻFCy ʉPRit`9h?eCc] l>8EH=B*j W _"P/*J~gx1qPAOtံͣL)Ro9nZO()ښ=6K~!`X J;>f)؆ז݊7s567^|--l Ⱥ_8L'NB1v-T*Nw;]7TYXP >vػ64W~ͬKě 01b{ر!)1H`M-Zl d6ޫQj1qu~ex'}p qQxf&t69xz6h6=͛W+P8ڃunIK4L t4@h;/*C} M|o秹l0~hӫoBoH*ѪU7E}3%{oi9GPuFhj>c3hڱ&Y 7̀B}3'@ 'cv c 9L3DbgWM\Pu(C|qBB |G#?p6vRoЛ\1S0L3̕ m1`U7_!%LIT)G)1tfGe* dz4/鏰$TѦje4@J'h}֢}w^mLFzy>#B*Ŷ1`EQs(c`DLzLV`s `<fTh@HRYZ9xwW0&{yM k'I'b+CUt)WOJͺdK[M'wS?y{ʏ Guui1aϟb\w5C\w{zP_," 8/w723ܜlt?J*ĥwJjN~I+̩sɤ6`"tbCIgQOGUl-y3c{2B'V,)1P#TQ1ʵMRhJ|(-um p@TC%: $'4nK Hga3m0 f6m|(9#ҹke_懝z-6,7uql ֡{Uzy:h H=695;5:Qr1k=܈vJnpZϼ.7$d9s6ls6 %d/i{W lla=YS{=1I/,1s[|{ɎO=w:7&)eN|Ɋ(>jevUJg^SZKa+TGϰDq=h*3} 7qp&7ƽ[{}𖧷!ls& &iK!TexQi5 6_VՓë]Js0o;xxgwuxw;q<Kw;o9 ß9?o// Ϋl g8~ ,u>P>> Fp^G% \% )✥{Ѻs@N]op<79-8Vl6J\SvBJ*رײ N(^?on 6V+S*Е,!Y‚lFJt/cYzhu}dl$Q '^$,"Fw q 1mRJfe B/o#牣d$(N*3mNW;u?+`g[́边ɼM5G=oy^@(\(,vNN)aYNR g`fთ0Sr V +ڵ lRa z/2;?!)X>FzMd]0ZqO]X(~z3Z$|O 9P2* e8:sGN\8|Q%* 7_QӍBcM&?c`#WdO .mP>W r\Lop9(.xx}?Lu̡tAky[/]_~~y׿KpGeqZ%'{t^.}퓓 #!ԒU_~0> %9tY/n5:B%^* XE P"!AѤWƻ|!ρb,k$~(뎣}-LSuiyZz5fTٮ'$ 6r|$Jg7:mUR9Tݜv*L" *gz7+/Wz^&w{BU&CA˹V`I'r1o׏k?@ͱ|E>vz~<,`)dS)*# eV@:kǬ3u3ﳲ*X /Z7}P1m:t8Kl7/Ϯʟ3C^hsB*qx+or,Q$ pB &#MQ+m1q&DMSKVpȠms`5$ [^^j QHI@6Р\>d$ !Xc@\Zl1qR,@GWKtRY It`LS$u*Ԉ K=6ݥm-0.a,1S\@)88 % E$oH<ɘDCšdK!uCXw K,d',r4j ՇKNVp4i Ͱ`AMcd@o`aCt(4ʨIsR&tQ-0zyǘ' M L=Iz`c`֍\g'?/G + ߣ&~y фGɁT~A<'=aj^EixaQ?jǣ$vBo`^:w8?{sNi [{J2 h AZM8%hvQppS%Xie T4VkIq_^Rӓ/uٍSfN:8u2]NnU.1!WpibÄVl{lbzEl7nZcQ@PUu?)ďMܰ nK5V,7&R>~(5#N0RgMd;rn 6ED1/(Q%*ҀyѭJ+iunr~#ey_|ujݟ?4phu$hzLO:ZL{r9MEnm6fSM{)mGU Kr+-?;2w9KI" qvګ>|sQB>Xh`FQcӱ03Em 5@Xd1ݬb%B W|ۭ*?m'DIWAh(8\e|*P+S< m~8J)q&z =Lio=M|7фo_?#c=+2%gwuH_iIKd6` 6d?#)N<[|ctUXU*&aA/lź6t( cb:ڻJOl¡Zx,{W͊V]V^m*3WTcYswFs }g5fYP쇂OU^Tuxt4L̈́@H8 WdGXd>40A]WL(`䱏WpaZQBe:УLkOY2dpږ1١ʩ=,A7AneՃPg)nܚM͉k(ʔv$gfEhI@:TKnPV %+\<* ǜg9ˣGSN׸` &D!2P@Mf xlz<[0a|Z HDci׎ @ޕ%(%e,Fo7s4t'RRJw_Vw).ߥjhz\H֑KBr<Ө%&`pV d$ >.lVDžm_xHʹtki'qH݇E,ecTXǢZ;-)MX+>^(g)#TsQc5Ջg*UH7Be+ #XtX!YZG+!ܮiYEFbfٕ̆=5϶Dh'M`PKP2;)zmhFb NFzE'wqDTAZ鍌Snt)@Ѣ6a4-dH]"I"˩Ԅ9_Jޑn)%YV;\E, &Ef]hUdVGRTd;Nj`QCFЂdheVNy)2f~BJ9*Lk^S﯍mlU:/訸Վ>x&N7FK/LdQy4ڕBm@.~R&OE܇HAhk;1h:(mtV$p]*&bSh9]ߦrn~ȳ(O]>Y 9'W׋[S0_~~'S|MR lZfZ.(^;v_g,XU/[Y@5YMSL(:곎֭zI.*IQx+SRz"#G ԠEJ1pt慦pkA)mP@N$g5.FNJ,Rc &h4p])b]5zHS+{v됓mtt0j]*rIr"zqr#Lr ( Dy5£֮T*as%Sy4e p2TJRҡ[ e IϢi꤂ 5Knt)T\{e(KVE|>=E񽪨2*qOf[Lm1z@rT%F*Sԙ)?B@]I a&Rк,b=!AA:2"] RP'4 zܭ*䱬n* ,p'hӌI9#@1DW$*$/QH"{%&qz0L/hj/8Ul)J<)?:}FhM]sP{W~ ,m LKb'o&%#WX@J d^ywƤTtU< BW=;ӠP4~?6'?6i'2,ҪqoHlʢooq7~Ec%u& MAjOpy"x&ԏ;]@Axq2K1 hVz/T.RBp* SkCS,4弞ۣXwU7{Ʈ]kh/? L4)р2i7f9OlPd.}.n`P;X #>l<2PRg0[}޼e NQgόHM%6CHĪQ .ۻCtg:xyzޞ5Ik]p~x?'g86PԨitc>(Sm@`\vީ9n`gֶpn2Ä4 z> 3\[\!3 yn$(@brtwA )()+򊯝\r˥]'\/ݴ*(۪UUǹs}(S$'%H6qYpς/e,D ;)D$4)Ds2t0Si-:)D<2lRN9r.E(?|e֜ kYȄк2cԠΨR\gحGĐlNe5'DeҁlL&034SUݗr\ərPB V<M%B?/5{w/EVDU!f.KyroqSww.f{dG_Z;uKV*JXljkGiɼˬuTzb Pr[x,ۇ:vukPQs\rGY0uDHcsxwkυԚ(4lFj}tLW0+QlQ؜@I'qx~[SyǪ?3*o 9z|8^N ^>s̐R/M»_1fM{+8s~gpJ\k8"ESzmuFb,hW55WsB]m>fE{MwWjoewYO].}mcVV{樉qrŧ+!+hڧ.2zEF59ĪWġF^-?^(/ Q}"<q\9fuo4|)Q(W4*NrL~zq hڞGHP\8RU);|~W T!FG5+T+FeUJ :E:cS`tRb U-Sj*:fĈ%#J}i;Yg 3YkSa cvL.M,8;~:dxmHIN]\n4GKh^3igXk 67~q?"rg<\v5P\9Ot9Ar ^ Fc5$ŏϮ&ypq5<&4Ngǜ7#6etbl0W%nj [ZE.`ev #U|7JU9ZˣoaQu]рZQeӫ3 U?E~K]USta<>29分% ]:\8cM+&)cŇ$ #NYPL*S82%.T.~Lie`ki4&ǫFA 6b%UHaB^20J +AviDWnnk޸,2x!9EB:3ԓ=c6ᦄ/ O{( o`41,q.$01 cL=ab }6H%d1gWEך!5>.V;`|<}e0NaNJuCJDn4>KA(%)y(pw#V(+قH.0Vv. Jngw./]$(iMD"Y~vUxdۍ)CIaǢOi\?r9?8=f#C; a!'a"68vmvݠG|,k$ Hl"S`A`)13]C`gA~`cN&=t.t\# T(.b\{G{HvRA`Ҥl`vX ]]?8=n*:H37M>i^6  p$#A@(ae5/F䍸ǘg)f%<<R] ^ uŲ/CJ0RYE%[ZEM)r I0l̦ $|G<`}/X]q:Y3Fs: $WO'2ro_UaƨZkG&F0=/!xbSaPPȉwRj~NBq f`J.F.3)@Y'\B) AFV;57ӗlo,~皟>n7kd]n@h.'z@2R  :F#3]q+g!czg0ԟkddE{̉%d{*SFS^Y8KWn D|P,h*郭NN͙MBTV`ʲ={hsBnž_erp1<_o*7P6z0G/Vp|[ƤS= h7ZAxYߚoVS7iiztںI-Mw=Pp(ABaDqNo H 5"#;Do[),yeohry~ þ[am<S 2[8=21lO! PFXfW(gee֪[v%̈́_.^ŲAKr.ɫZ\Wz0 >ѫˣOES&(ֳb]b).Jʷa|yE%Oby? y$ mOE`=ANԓf%E\2XTĹJ[tf tGdKaEJ)Z"5|@ÐrtLc\1mdj"Sd/ EV c S2Cb&MDIZIt)%!J`=zp#Z=Mt FpJ5Nk9$G%9ϱ .AHSM )tZ$Xv(|a4&)o] Y$x^Q !wZBVzc>HMR850 OdLm0@#F73lQǚ4X [ՏSZU>ݜ^Lg:#\77-?Qz8a}2<_GD%Xr1?IGIH8A\!lk4iέs <阵-}Ŵe9:̸2)pUc]r?:]m)iĴ08E菱^FbrwGbD%rϯNݑw6jzg[A1 ?OƋI} ~3xALpX[2c}GTt'X<]>tUZٛ£v, 6gggt0JǵQsVRRypzWnWrzv2".Oyd6urU }`/v-=HWqOۿ{K;"W߈7_?"WϬYxk??d#2>\J_(1 ß^Lgl|~gԨ. aH~z_8%@8G3?]՜3ҁY"Tt4XB9bmiA9G?Z|۸kl `L,24A:0SZ/z W# 6)wtV0ak?:?YIOj/?' MZ1B '$N" $~sH+EsfÏoϋbNSMuK>ܵ $I]MN<Z͟9敇:Ʃw:Wv|m:z$%۽ B^uDcBJuĈ++5Ӫ1#9 iׇ6#)0AUCaꋭ5[l!iLojn &һM!lz4"Q%;>aL Ӄy nmLOꟘf&2|i&A+dv0=VZM1۩V s9<Ѫ&2[$ZuyIn'ZhCSZQVɄjmw d"1[fFAc;PͻS WkxgW?LcŐD"mkQ0nioZQu"k>Mr`:Z47|>Fp1fCLf 6ap:dA']z %8F*-74̴tFfAd(@YU] L47aCe(Za\M@4?Ʈw~j5 8uVf>7QZyvK= DAZ9^g&!.IGh%ֺ @9Ʋu - dIr`*. MZ"b%XUJ̓db/B>"F1nr3爦EfMe&Zͬ9ev 6hნ0Y*ȹqo`%+UZN[h7۔[thfۧsUs4Alvzd0]avpIx$pLA[itdmNsx*2lIkl DvWd1#qOaßbS\ -QVo6ijcye[DKYnkbYg7w#5凟G"=tFW "TP-H`nj) ;kO*{E~u(%1=E[L;L5ӾNx"bF@C~AuCo/՛ofyx ZKtNn-m?C]{݇ͯ}eG=X^ӟ]᝝t}dxviHƑ%fn(`tfV3^02ؤW Z'Q)2vzB6fuMIv]:D8ă.>r\Ӧ~Sw43!Cd]}ߣ|*`kҭ&\OAj~JjvAnS+c1(@K|`ѳBuW/v+Ԇ9J/:mOk]7T;Jigm0-qx* 7bl {10izh9]YxW8YЂnꤺN:YuS!NƊw#"V#Ҳh'weH؇x/ p5뙗1$٭%ԾT1 Օd B R aUh2`s_[ȪĕƺaJw%)%3a:KA $IJ$5N+ae6@E% 9Jj7qZ17J][CP3 VM[qX &D[HobZQDQ`vۜ5~TnXxknظ: 2Gra94~4n c$6ı)haֿ *UchKPRy ZI.sh@hrI#. <(^UlT.1B2ЌMm2iƸ.Rᎇ, @ x (P> zJ%YpM ޼4 @"H DE9L3m:2Rݡ-O"Mb.&ThCOcS}j_QWnL՞_Z5WUƨU;t`B`Br[yd Upi+Wˋ MJ<Բu{ h8DŁ~՚oWtf30!.Dc2gX|[j(^C":t(OwY sI}NQI n3d3H>8G_?MqExcp[ӫ.~xwq'iIl(Cs.V u:Em֪9wY.cDb\S3+iI ^yUܢ$xN`*ΝHܤzv:y2NX޴zcx:2#s!Cz,@-\p D,1WHG󫵠zDWO4jsigWUbA㎧~TΨ4>]8B 2eo櫒wkYx'dS[$ g f7Ռ+QP[ᵣ)@NqX4HI9[z  LUrY)3@0 >J]@fJHQAD9(NJB5ZQj'OCX6TVb5g":Z8UXQj de*b1&X:VZhZYRU>s5!owWiYy9o|3^ޜm2g1Y*N~jyף}jPy=o?]Wql/lnެŻ"O/W~W?X@J}|J??)Όm_k^~Z{yCCy 4 WfF[78&n,/j QIբV7Y׌\؏G%P5qŶpu VՒX%^) ZY,sawUoi(\b1j:­ A[ύTFnW3r_EPC8`96fX*@࣎\.X7 +Gr@m_.M  Ph|j4ʜV9OA!8q?D+uС&hJ:g%FHQ2Rm@|+7wDQ|nB n}M:!4;[,suQClSD4 7+G':C_x }q!Z&R/!́Fz!sOuQD4= JN) ]}T!S5S)$5#ZWYITl?IMOM@̀Y5OPQ$*)Cd])]!"vBI$kQTH!r fABuwOsjx@2sU6 S܇#S:3EЧ( 4.W52*WC5Mq!I(+s~MC 5SjCdAM#0DrDLm#Z9jfr -<=~훯HL?"1B|1'|T= $d0 ^3>Ko4YgNtCBcKcj{ZuZq4_7_:gJn,ɪEyt/C_wUb4_~JPIt;frE5~%j\m`V Jm0+EUF)28G BJ .''zsόdӣK2 u(W@lJqхdhB(%ISxX>clj8ՙ pUƍ-`$sl؋g tPS)圮KW eԕ!ܶ9P5-X}>9A09o\܏ rf, 3yNe@rӷ4 W,j4vn{UKEaȞQ^nHѴhMsէRSڸTOxZ 徣i9дK}Z/Ohopהremj#YxN rW )׭ 9Sny˯ owO$M)sMYְZsgMbs;r6`wi׺4U)t#SAGSѴ&ylă?8%VU/&kAY0K /L98G{"]kFϣwNؖRed̼k$uBEh@{~|bSxdMQH<{kID1AҾxN Rt!ڽ)]II;^T̺5Ivq/V^"KϺSR4&SiīM(-@^lKҩ| qB|{Ӝk 08%G,Iʪο >N6SZ8'oAuEd.%Q6# D6/ `jeFC2Acuʐ&Gw\s uKgꫯ.c}߇˳wggX(aaTs9(M.m<ۨYTGZ:$!|F~X\rxp)E~a >p62ц $U:mb J0uxi8ϝ=Usoh<4Sy@ 1;TsƁj XN^smZ,h2?[cgZKM-273cxGf z x?7wf:aQ?7G? ~ྱX׽?xK*B u1wVߖhs}m DN;])#Q09(@,-OKiRQ@N'PX0s҆Wwބd)sŋթ~{)HNi%穡2c1Gs+($pNH] #;?"9k5XTy/2)P`Pǩ2 N-aS!Djmm"Iz%tg4 >y{x4Φo {=L4up) &Nj#d J22ΝcΑ*5z.?kj/1H0'iQ){{I!L$֣$1cčBY4VhU6 x.U1iD¬CMaGOZkݹDE6[;URfˀP`݃1\@yJXb%ԧ:aw^W8|.#M,by3v@IR ʵ\1D7X*xĥfsky5BpPS11O& ȱN@$k S@SqI"j W*FaݧƤ9,j^}E0]G)TjЊl& ˪:SMcDit]&^ ,Ko-jFr6fe|mFΖk,NvV?ۤPV0h)8Tm, x i9aVWF9y͔W]ќ}vbPb8F 0Vydqˀb".sqհhΥ>bfɲji?f8GDʵ >/ f2 𰷸O;8phv}\+6hrNOW^_ZFs֛dE`JD{L^899ϙ "9mlS ygW!+tּ\]|A .a(6\fM Qr) B ӔCO#4 S% {P.}JOH&# qP"X{ cXOJb& mY!Y.rL77J n׃|h>~f%/h1*onQU#u%^0.8z7÷ 1_Lke0:̪;Ð f~| %Q'_z&*1~ ~W%߬ks|oY ܯoPAo~fnV*ڥZz8Hw?^\͇y3{'_Cwn.Z2xWnxcG~3|\t߹wL^ ׭]~> NAݬKNA³|ṉۻ)nhO0c hBʓԥsT41D'Eq"Ee9tz㉟yuo' q&Y?}5n lLgK7;Bo׶GRP. ˿JyvneV]+EC7`: 埳 wX.&aʯ/z&, slkTpEk (JGfS򒖃ϧRτ0|OF)i Q -2 1c/,Rȓ ;%ySbb 4I$vx0a^a$IN з D&g.w.,h,@]sȄ=< 04}[%4u1a tfgNף3  1 7Ƕϔ(OR˛JlEd@Mr %> tҟqX1 njcmSo." r.ߊ4 +wtt_52o18߁-_=}u~,\WiY1Ϸ4OK*CTdcoP_ :s{緺|{wq0|܋޼߼{7͛ÏyK]joW].ׇb_uBSj.FZZveTtڷz i8lMiT츟LЅTjV/4*ߖ2]MA1%KQ5zU9% 9>[1 5ͮ:-4k1N5noeSv=k=+tt|{t8ye+nUQp Hw#]EWmZ6fֹMe\tLmw[5oC1%ۿ8\7󝷱7tNbV<2x5MTL E ` ^#jW_{5\|[@eWc<co0 ty],, isXT]rI'ɴu++=˧iܘExlG[7%~Vܴ6M0KI2:R.&Sd򩰩 #H5uKL*i"q$78XdČgs4?tWPbDɭ&JSAh KAzVW]TWBnBLLt~{+]^ I{Z ^MY{FMnGwnw2_׮o<ozzn^}w}nBnOOgmD%hq (_ f(0YiBJc}fl'?tC}\(j$䅋2zYOx`A=E#Tmn[mŁtlʜ }Zػb(/e=ٴNXjcdQַbn6EN:OW>n3s đBy>Kk+P)Y_hxj:JTx*o5g;nt$XQ\$qkLk6(7fNFm ,gYƆ C{0iej-_N}IW\p͟Q%cb9ŚKdbI kT*`܋KL!L$֣$1cč =^|+;W*hl™iWHh`6Kp/1!( 98M䬈NB1tl pv>umԵH O]8u-F:wR!a93zc\XMu2&!c%"-^jOLꬖ*5ùܳmGtCdd5]I:+@Q)%QB5BVQ N8g:䔠27Iz%5 >mPfa)`n$q#%"DN1PGS,Yc"bBOuv&DMN1Znt"J91iB\:D M@<'VQ^t3D.DH8sle";LehBFW!!/\DdλF; -[,Nle[{nTƴ[4E[E4JO$%]n<(":}n훺[4E[E4JrY]ݻnmbyPDtb(ۏ rk֭ݢ -ݪ.Q2t8wmy˷]'(;dyro6Mz ~L:z\\ L!?+ջ[LW;㬔"ڦ]1WjCg{dǶf˒1!D8x(0&O(FDI%zUbzʝ1ؙ>fo/xQ^8Yl?z+;Om?*Y,h2Nev5cIaﲿo.dB(mS6JաJաƥ0Bc\^(uʺfRv+)5XǀG54 ̝{3|4d->om>#ǎZjvww/VH4Qb$]YP3bʚĔ#).L7 7i/ ZGLJd;yfcNA\-ajgH΃eI*KeIrd%FX(Dp08VGLGaG '-{Ϸ砋m/s'΍ߓy4o|h=[Ƨ!^y>B#-Vdq^S_TS<ωFnJ8]Z3^u]RkpRmfFuPIkI Ƒ! )(6+2A:Bn)v}iL#ĦN ㌳~X̖9{\$, b ȔAUf1AD8fCL!umfH%)(R\ q砕OK\$`zaq=>ןl2XOa_]e~EV𤧟lSg_ռ?($b ohl?E# 8H n˰(|><cK)Bb0!1`݁_ ̍:aAXUDqej$&&cl0FK-P(47 W|y.yORԽtAM_lU3͜i[ۂl Z5(NeX$6Aˠ344$).͕ (,xM֯F_J KvKv^,_TV"MzQio@QJEYQEj1;sf={8ere11SBPFpp(1+J"&NlҟtRTp>g^x+>(1:Jm(Ɩqv~ 4~qo()heKcܪע7cË׼x)h߲Ź=ӉO`S{!Kɹ1<!c0kMN}3K_6d1! Faш1(bc8$&F 4U)lHsHr;`8자s Y1&TEVų?=஡$ fE;C[qK0ᚪVLKݬK]b A(&f3 GJ  @16&\|PJzQK[66Y A)evuv5M.s]M6aE[)ٟjH2dF( ^1TE3̰a"1#DB"͢W=5/s%m)mT;&A<ܧ:)2ں{-9<3 o"3Fk|la[# U.LElJjn 7Lm9Lhh_p cAAXa"0]ڂ:x5"mO1 PaPLLhJ 2$Q@$%#@fulYL<ɧ&D4zV*$+7!38aD*fؔ $ %u7I>#" L+ , B XEI)'Q$r![Pj/)&e!h& >xb5 "JAIҘB*O:f5q!zID@[s #B%\Vzjǚtg竑 ^z5ePq@Ӽݨ$ LVG`5gNbb}P@?s8@gFXלvLHRI^}6bT@+X>F\P6"N =Ҥ煮zYZQ.M5aFcXwRVcph#+T Oj#W?Ѹ73֟.x[$Tm>aMTx>Ol>xJp L$9-6w^}1>Ow`ln'Dt!84@@%&51:)Z"6|(S#i9hycXskA1iU8>_]m pd󉆿Ei /W~*?F sg 0r0KYC0y?Sx85}^\24$+ pʁJLݭ_)) #mIU j6d&8poyok*F#d?̶%bU!;bΧe &hibRWw%h]0\ϣAOtpX|zm <]#Hɝ]-C-* <\'/BCh$y!^]ɞ PuTI' H!'* K7\6hDd徑iVdrkW&ڕ.|0rEZLp|p6F%q˧;S`AB BG[' L\BVw2A}G:gD?]jpq2B@ʺr`@VƟ6l ƺXj7t/l<ݼjzb*ޏyrR0="6ԟNbkT8&w#`F.Ņd}Z`klw ^R,n%Kzs"|gu]LAX]\hҵkV{֚3ٕvHWnglwK2ݓlMR^ pܼgo@MzტKF*EAU-M~Cƺn@- hѮTpUhv ੕~eH+qdJ"z+) ^1%{MJ9iڙ #N-百Kve (;AZ͙{2{NG!gLzqWf3\yTTXH36U Wunaa`UiU 镙 B$]Y!4^ sT'V+{U3{Wz%i`07sHF@}H iu7YٝlS3OY"$@X_~,ɮHtzipȧ>Y9Oaw֙QLyAGf&?]q .k W&*Oit6X4Ø J硶ԓ`?Cet7͡wFww[?3U|0I'){B/@W Pރet %)W^)'Mb}+*cd+V !ϖx~7E<) )~0ck`L3!r'zYު]8+qY J[?O"׭j_5R|@`i֧yyk WCf>X>fdz>GHrys]x GyXp\;/2*_>CGP;K%~އs#nnh.9Q7ߴhM{:T$[Mť1l=/i_'} '],񱌿@qS=$Z֊BNM}dYBW@ J% &<O[z \bypmoDMbWV.j hg9t Sϛ]LguŽ>.BCB.[H %z5,N}< S;@ BuY(y>pb\W inl3I6Z28Agp_sy`١B|9BHZj J89t0έm(qU}_~à=Z_#\cG٧ḃ,: qy<ݠUZHzNQ*\ν JN{ WDV=y.D(BHEN\z,u9C{}yZ6%7֦ 1($;vfZ-Qx{lg20?]~a82ˡ5EJ{yɂa9k\$8Ʀ7j7ô1n@i;oJFO<.j?RgF6u'+IT]f0ubX.%ų,@..IERIFRԩC]+\㩀Tlk`ygs# /2’2Ue|,Q?Dn>Np17-t﷥7 \hq:nGKg>i 6f惇"-ZYmm3:PChUHہd TiyO z2PdcQ=\xzT3Q&o:1X9SKα!]wd%wHLMZ9>X빜&y0Ls&cQ[3tPbNׄGs>k p>ZZi+ ~Xi #G"~(Aoҽ 0ل#_ap^־>2SwݵIa jI;gHRgew4ACJ3ӁH1VT'jV(x;vQE?O" jgNa(bxBN~ȕ>Z˅\ !sZ6j_>.Fg;|_ԣ)*^Y>Y;Ƙ [;z+Z?yCņ N!Z㧄AxEHpWգE XB.O$v.] ~}Q拌YR)+`э*끾FFceY֯;z=xz; CQscƴD ga8eA'Ӊ tGaS˜ n "2_8G$k"?LRf#/6O!- K4>4 /Q(&M&rd4͜a+]I7CUhqDi;z-v@M;5ݫo5SWOҲA?7$~^f&'^;!g@H @u/ 4_d߱+55W$@ R_bۏ Fh,Ocr̓'>T0SJ)?D"$3"+Va` ͱIӃMaJ0Z}J 5G[6~Āz n(3(+UydDftIqOؓlFr ^?W?F4蛗dnB8J#O0 A@YYfҹuO|5*L 2zDR4}jz\/BM'*˪[$y  kRk>A}U1&95*%g9tHNbNRNp-ъri V!v"Rz:m)O+l)Ǒ%E},MS `ԋBul[ZaαKZhS^1 dEeX޶/wx=Ec?h(KgG.M|z=&#hb߄aߢ'7yۛgc#BV_7\iW66>Jj0m4l>޿UKz,}~ NO~07M謹3Z0|hf[c[0!&mopo:\pR!x17տb'9R ~B8=]#-'hE35q:W3u}8FUaa}Cc?H soW/-R7~1ұ$Т;}@KDӨft\eLn]깢əhmG=sQ6?\] &2}>Xw)Nq|fxv-(}=Wo\oם`r;Ze/\.>x),T--Gl^8hP\9Va6[{jXr h08(R*>  9{!izT:Bm,ZgGnkWƸlS{9hKfgs<ֳ)$mw;Bw %{xA(GΛ."i!}}:M4s`T_Sg1$d,g9쬄8P>dh, -`A2u69GLkרr%j5ZANJ\VC ͢ưS7' 8&ϧPW\ULp[z>{!feh3"*={S+<$"WbO/9H%%wM*XqLbG&C]#S\{eՇd`ˉQkUK(iೊM6ǒR>ImĀ rDC]:q`4UjKʢN_,9&T.7hjMsUBE=?W<Ďm92́I`c cy ۣ̑vKeQ[&Z(U (so{ˋ+OkEpi[M窒snj,R'7,Of" *lj X c0$Hbf8ZYaX L$1X"mP\L@)B 0 D1 Dfa@ C: +,_SHO͙ԻUE$[jb*k*US {qWGV5&s5Ð7Qۙ7˷j`4__4BP&qE֧Ε7S0uq2B *50q ϳ;=L`TJmYPV8tUUZ9rMNAWp17BbN9.nSBmiUb6aJZf#Dn _9 U$]qX9ŗъ*;>[ !UvX_m00GݷĎHBIIN|*coS! i 7|b⴬+W^ͦrԾ-ՒLOY&|vE:wxzuy Tb;gwVgoU#tV|-kf'Tޫ{ 2M%~8m.r L)I,[ŹXhC =`:%BkZh쏺Ulҧdjİ~-9̗nrP'nȂMr .tvt򡧇zQz!_~XdT +֮O}^|(aL7=N:\Ki)O7Wh"@ըGXkkIe!J۩~j|n*נ#(;sGʖ.8ugfmƯ5})E Aԇw_lÐ `9iLo%唍r8Dw:bXK)A'g~ճ(δ,GY͹*? TZtslLztKt,Q00Jmk8RA*C@$Q I@PkydGR&5gDG34$8бfvQDD"wt ~i=vg4 TF>>V!N\tXf?LØ2%C}}mW?;f #GDQpC#Ih H!$< D``]( IibqyŞ + 09P%Yo !ۈQ<}/;VkQ hfeUL KU1`@7€۾y~/oDFY4}Ě҄W)L?凖&45DdkҋF op8 /(  ȗy8쭬L6ܫ79'9˩ ϊ(ܑN.>`r3z'f3XE +|43Nou82?;Heכ?\+f=׻OitL3V.Ek{YI2UgLջu/Qkp4d'?9i#mqD1M^4Ш5 *Nk&OQF$NhEn XwyOӖE屚hha~dF2߷3jE<Ě9lܚ 0 @E|J'$7wzak2{#neΞrm x0zN }C.=~ysvm,E EP&razJ)!~-SSŠ3kڬ-~'ɇaNxBC@$/v42ϖg*ݖ狪4utԱ}+@"æ*Wjީ665^ʹٰOQaN]OC ~ݢu%%@ <%])P{kߩZho#8Z9DJ*{>NYGI2̖렰T`&*rrhXij!O4#cᇑ7RCU>cUK)50J)NMKZv@8Z+ EyYBOЫ ]O  nK]ޣ^JH㟈-[h!63.uMN ;$:5'9%{8F  dDGGvlغhk0i5MěwX޺uD0Btŕ89;BЃ;Q,4j}%z 〵?Vy f[G|GYʖ``%؞<$D[4Ҕ!RgS ѷC>W! fjpkmH _JÀQťHP'/9K2V"۪$7o%CԨ;󙙝1V>(Hwm yF܊j\I)[ͅۊ.J7hiW.v0bմu!Fצ2ޭR jR.ۦ.{0*ԯërij9T /ގo}Zuދ5]JT- *)Zkj%*^%KYrZC7hV>p^4Z!]+s=TV#F@eHP(PwY%DgL+ aՃ`|Xu1LJ]W 8VL+ M$ }v U[y}d Ѭ=.cdK]"@~[_`#uerݞsQkǕd6Uɣ@qub4Fi¨|M~]##ɗGDL `ق 4֫^̢S؝ Q'k0 # Hb8r;_"U֫W[Y՜@4>ɱY#Kl0s,+EVIGDJ ƾE,~$΃)>ؕlAف΃}C '0ַ@o6KL;-4C[z[0.vx^"ۣ/ޏW\'Ez^Bgޤx BCx!rnöJS0$t"΢(2aw˪&dy<|-+*G]:;]qM_ v X$Pbk8$ رPH K1IJ$eAdJ}yll0kg #JK#hv4@8HM"2 a (%z\ rQf$٧b*&G7$e*GS]éOԯr">#O]~73_v5v_kƳ7[j0*[TIXyT+Da$?frx8 f"VQ3։OX15A%.BIq%1I9%(#VՒ ;`OXHE!bDŽ0,( S2"D!+pı B|Ű}A 9>ksjFc`֙SKdfȫU+?860;c9P042c ʘ&ޛ?&ex\_=<0M>X_z3tf=#_A@/xt6FɗO}LgGy8Oඹy үw ,I4U6nL`PeBUC`Dsw '"`'1xEl%|1]XlL0TM0ZU䧇;gFMlfw G{2"L B}x 9gR ሑ42 -8Kΐ`Cl0HYb&!3,e+0Y3W:SQXKsϊg 16*ŋ ׻^ J]o$rbw-yI2)#GYb3!|K6NC됳#v!obCJ\]-y\H< ;<[\4dgI'i ?y;n˷I\ ץhs4r϶]]o @/AF_2y)M%-Gzլɬbj,)|27}̻l|yyܙ;3=؟xm$<+Yʼs(~bXy =@y]е?w|)+۟Z_dמ|˹ǒgipwOn2 9@>WRԿ"vhF,O?^"[D@5|Qɰ0y^L:!/ն},)/.ۚ `ٗYP[b> c7~‘-bZg /ĸmdvӧ,JL h`!ہ>$I.W#ޓ'0v'XŪ+70&i*2t !3,bB+ "AIN=c!RSD&V aX2hrx} g҆F\;}zZD*#/G(=g-BHiSpó )BsCH MW/҅XXAZ@yapGB83t,ނ*b"Č !EX*]a䕬԰԰p݊%"|1C$J@Wy{Éyx13t #}t3Za.]1˹·9̲b;#JCF`lXysa.򭔍8fa0T"C"PqYO"֔6]ϲBWR*ҔDrG6GR*R )qb}R##Z\UՔXu5I+LeIYK$X`F2e""lKagD /!Jň2x݈݉C20K0]-j"I}+Ɲ4 n Pfa\f]jhmPuQZ4.L(x폓4P[|hV&T uJ?jU*sˎ-3c7nv}yQUr)ByO5n_8ZeUNů+}pH2S>LK${ x悢ʋD*!.E([*D&'ta XsZsIȁT(N) yk@u 4q2_Wi% &C> ySF=^&CJe*:usF2Tٕ}6rsUwa3W?8rӊwh/*[aMgC~!wjKݎ\ ^ մ2Z,So6ϯpjPT 5zu ƃ1SBV@;f7-|7ӱR2sm …wC f9Ӎ}KXPűo8&lW>3CE?4Ŋ0 gr[3hy!j*B𷏟sݎ1$kur.AO4[zW@.VO'BކoM<9]d5B^[Bg ސP$]+4^V9FCZRrp|P|kkcgԷ 2TXv` v*a{gtm)цBJT{n/k`&ZP ajX:rfǬTV3vnDJ҂BG^GF#tVg\U$J V%O 8B_Y4A)+15̢Xbcd\*-J:"B'gVjn0F2Ox_i!J N#OR3RryaA]l5GDcGPUSE,nҸR2! Rp`i%-VE# S!``B /HnH@ jbmzֻŽͺ&>XHJG8vp " DI`݉#,9LMu4SsH99\A R?_ꗖC ,7!s[ Qq?m^ˑ3^ןd|-c|lfwl4 Cyml>v$K]a  lӀE0$ E E`UY(|1'z{Vc ~1P3{ڞwmI_!eq !s ~AȴGHJ~CJ!pzLAENtc}3=/5!Qq=%*<ާcLQ!ܤR0TpA:;sɁZ;1(E)C V+ՉNc#T]zmERh&u9@+# 7Bk0`0*qw{16p>_&}\ı(fٸ`>nɌ =sN39{Iz`y\=?iWK7~ đO/^ccO*.xO[glGף{nnh {pu Q I0S~ Rd35 J]*خwR a}F ^R ޠJ1)ElܠT0 _Tߞŭg۳/?L )#/W򮑛c`_<˵rD@{Kl BA}VX xu+p8x.U&` k)8E  xFj, *!8}4{s9jU8t-$\\T2''1,] YJyL$@n]h!EAsS=5_mZIxoYziq,WB@3xU_,Kj,cu&&Ң#* 8oݻ7#c*i\H:f͘C)&ڌ <1hHocLqxu#C6 bPI0K'b@0aVx,ZkZC11u\ԹTU1+:]Ǜ2]+ӕ5uDCvyL1' TL>d. Zsmqi (/.,!9 lV˪pt$?a*߹{$ (=Oɦo'J aD"BNSM\Cueq_v+Z?*i{!P?}z? vEF11Q'pBGdBG C6#;:: ʼnBi'Obh1^:~MMoS('1>ilW;>V)>n<Sl?.'+/1ShPZRE Â#sOJUy$(s jƖao0Vh֔De\km+R҂_kiXEanpo=4HO g^s/=qW0L"K\5ޒDޒZzc <U'I4;&mX.{ uXot嚨)+ ?ZyTrϟG}fDh_af}jgws7s|e~m̓_OV7!;/Di87aZ}tvHUEh0+ġ-?S`3`+*s~ƙk\ (sˣ!_SDKA1vT81)%tsU"jgtbL}:kĬ/%D)ÚZ#5IK#'-(Kim >0`^A04KsSBNY(ʚ;P*yh& e,qH` \xVVp@\, \.\C \)a! dDip[St8VoDTfr;ע {hrkfONtt6N̽,Y[*"(EEVQ_Oj҄ak+6ZΟ,*]5E{m˃ Ƕ}4ik3K"h UtwzNjBZD3\HSVKԠR0Dp1- d c {7BB]P"jPm7 2Z UI;CjaB US) ..+Rs%3&WS ClF/ 0{VoyvmU;Z 'MG"X_y8h$fJ{kpU&|AA*U˗.uikEڨ L.VwfP8^@UU*`vz PRUBs6nx&A<5*DeUMK1>-2 I;Ub|)M#!T%8|mF™\*XDpc{Zk@/Φ1-3G{مl&Ӕg]izO-5lZ[mhUZ#D%\_FGq~`n,{k,0XB)j`6i:: ,RnRIȋ?*-JGÐ(6nGfVĽ} /!jڝ>R݂H%H  e1n @R4Us, V;ZpB((Mz |Ry+ `H!#hf$tcFhqdR f9?TVyI[x,`zP, #;yE<5cܰ<=^MZR4a3cxڡp_%;K( $%dl>e5cYiHA)װ=]FDiA\(AƦ{Gfz˘AՕh\=8N8Bx'Cwq^bM5nȄK|=ӽts&N ֍h~ ތ׈#0%m9;Զ{0l 5@ܨ/CivQ= lD7OnJ0hȎ`pf._ qT©˨ Bb7@Ey 9 ̹Ych_uPہ+JH65[)r^Ѥ5[>DQuklmH3"x xD1FJy#:c4n=b@nńj6$䙋hLq1#CVJP GtJh<&1Mڐg.E2Uɀi݊w fͅiZ0@2*L0CϓC*-MT<Z;և|g8Fl)85'iWpj*{ޢ&_*"6Kk4+$Yl!cdh|8K]*T7V|1FdnwL%G~jzFhI!C9V^ 5ZY-YmPyVMviQ1-'ӽQEjZ4 mƜJLE2%N#DYC&Qwʧ QnNgj shv |,^;c&D39c{apljᣅTr+ksdNB3SL뀀[5~EV8pr u;X\lj`X̿ŧ٧7z}Zel3Z^%ݫ DW;x[ `j)4t>Ttg*.0!tט[ݏ?{-OcA+]FfdXNH"FqE!H ҈Y(ӈ%QAFdHʜ7JӊH 2{UBa. " D(%C8#IBe,tɬPǚA@*VM!z̈́ Emʛ`LI *V$rbpR2=W)=h3%7\{GR1L"@)@v2Jrk* &X?}WC2~x#F@ RRj9v`$R{`y񰎕TA/UQE!P@dE0" 0O4X G{E`nA&} w,\\'`bEZ@ly"9L+ (b %%F@ [TDZ$aRLlꍋ5d b)?i _C[Ṱ6|?7pNvޯ>)]O6N鉿8X?ݼ YOo/*l^D"_wNn֞u +;rMw2_8NގJLʃr)sAC4(ꙕ<:! {n */Zňpq(%:TQv-Rw)8EE((k'{1f):0:Cs@*p&8stD AGk ( Tc("MCpLYSLXJ.r86'tO4=?Ix;Wwy|2g#ؓ|< ?pߧ uILo^طe/JA4|s@?~9&f}:_ J8Zr6]eUwzU*{Zaį$F ]f} jJqC-;QKDp>Ԧ"'Z%AvPn*hDHDFR"(VI$P^-vHfHvyMDT)JS_KWZ"- hHӡk0"X!]D豂Ԇć h X|bD؁(ks#+bjƱJߌb>LV C)fps5C\=d>Z ]\?2*n)p8eypilM۔nY]^VշK??o} &nS9o)ū^_8YϦgg|@OH}AﰘoN~q1uImزF:b4zZJ>),EUlJW+fb*eMm @u*s~,j8~$8vcgEV4qZ'eTBE!##0 *rZ=' u ʶQ8織ӞPRmfu)7F|ʉ%:pŹ-?]NRzQw4u?dec4VShی*Dqig_zLiFޔiAؔ9lI-9Zϵt[[Ɂ8x|`4s㸁JIRλ u !$Y),ྤ{vF^g0dM^&UǞDC +8ЂSu\ m&Sm$^W,\O79s.:XPR %FAmGAhe&&R+A2d9ȾH-[Vo/|xJXƫU[6u{a2{ bYq (Ӣ@NB.8,4\"bz%QZ :du*}2?dIlGeBQ@EƝTÖI%mg̰f6K"e+Xycc@+꤆## IuesltԖaܾK=>>(RF{b C9>5_\*0Ni3p\s7 Yɢ.}I)d-D|svj_Lۛ)} }{W{_)<hasM{m> 'TtIBO~M5znXQ{5'fpW C N﫫E*v֣Dj=d!T2[}|KRqE ;Q&d.lA&>Q@Ö/b;ȸ+f \ DT#؅6T"0MiѦsBΪ`0M78S X 8JE^BF*UK('eK<" YYԌIB(ŝp2 4` zc Qjъ1VyJQ)t_xOY$FLBV6L6Hip1iqGDKTzG i>VĄ[gfgW6ÂuIb:Ua !Sy+*_Zx)")"  y( QTVZ!DPDB7<^!"kw'5.~u68D,{v<_sK r(h_đ$G\ [n<+/'#!5MTh& *{GgTPPyף$&$*1`j,(I9-Hm >Z6ߏ}pS7 YF.)A r%6' Fn?;-B y"LJ: nnv*94N>-N_\=tk%yy1`y7ͻM'Ew?&Ho"j.P5?sOoP5&BߏF׷g)_󞑝{R p{nњcxL#͍:)~_%H[i_P`LKةA:fP һwk!" 8[!xYSkbtL2C$ihF Kim6`b1yBkmL1o~ҘoޓSqn ʡѠ;2RYdaqi1pSM{"BܴgrtZv $G u.v m;kBL[[ZFhbB?BtKM3yxL1JK]!`Io.P nG:UaɺЉBCIF<̴q(cj`:`UIZ ˔<$  \hCTK2f',K;R2ęDw<8'&8`1 21#%Zv,`{tn .//%z$ ga E{n!ˢ̊ RV4˳b7qਊ"~-[.LCʝ.j K*/*Px f9 xR郕'FJ$SnS_ɊtJɏ0Nm\ 7sL/fD5bBk̳Gh\Ȧ<}kW>B:o8b OYC%9݃c3R@"ogq^nn; ^ͻ!V9ߌo\ǻLK•+H{3S1{)$llvKhe\9{3v4m7^(,MXUTqȒ5a}M^&-䧺?[b+dcMF;<Wq^s&`,Ȼ'.ζ>y1~XpYE!jqPTFن߱/Xw|M-QAr+w+Wxa:sNfs5ήo⇥e2t͐y{\ _sݺdxG` DҊ'+csC& qC;'E?ym%GR$=X/ xƣXh[]>F椾)`do9f5H8[ðkmıJ7l4ħ:`J<α<{`OGL%l(́(MKnmv,A%m, )ހ2r#N;.w`u@ x*`v:Npy"C+Dl, ލK(9z0vtWvc?\ÛKسrx3-SǠ>/ q$JhڃO<F>ǨK'3!l*:VcG7 ۇq[™?|9-Im\yhd;CkWA lHv'?.;:$;9au]L#$|iNf&]n wrvGga@iLvs˥G1|K0zer9E;i{-cHgpzwH(@g(NsP(ЉV ON1Pp@u3;&r/FQn>2W$X`XiyÕ˵f _%K D 46ϴ7oi8S\. +lkڐd$\ .[ p39|l^N =hw>A[x9Ovr>vWg,VX6.q^ko2C}jOB |[o QUO(@ohɍm|ԁ$ֽ+œv|oiq7 (T/0g ( Vbt s,oƨbH:_wAc1n8]F9Vp чݏl:ce$do^kvշta0赱:PGBw6@]TD7{{"py#]JT+5#Q,** 3TM0εgʲR!0, m]ͥ(뜳|̵nVl- FRA؎Lݻ'nĞٸI ѓ 7B0]ga5o9Zh}3.F/ذٻ7 n.dFR0<7&p 2W}$z<Ԯ-\ydCM3 Fa+ԏH=?>KEDsCCa.515o!}ϬN?nNEu:~9;_,gOYOfkmp?."7䯘)*sK.ӫQB +0?ޡE af:!TQjYqAu2C=!j;<-%zMT1sC_y0TWǛ'P|qKQc=Cـ,|Z&KrfdRu !0C\cr? #@dI )3MuC-9oH{<X3N&Mt6zEz:Ah| ŚYpj@^W^a h͍٤FGOb"W!ך.kMl- %NYT]>;iJ$ Sq"B1Uu*W1j;*Oz2N(On81TG1t}ً7f/3 PL?Kkbꂖ^])ɒȣJd 1,RD`]pgT4<Ԇ D\*O q6qSbVQ8Tu&iM &OHÄ$hĬ5/SVW ;ù.GD>*tvrR8-]F^?HHeqQTtqi|}T (׬YWQER;p ;y2g9ރ6 fCtn҉|ͣ_ڂy];];^Hy$̒cdaxPa]3-qZso~=!&dU7 f6:;ZbhkOyWT͒ǟnlVUw#)ѺMFی?Xi)|er*JE]Kؐ">;]Hh"&beL*1.]ބ(P)U0DaUTdžKFeyv@=PDF|W :rOGӔ'4 9UP(;]{6x o;8a/=a#*_f{B䉢$h]3xIzn5y$'IR:a^B^uQת=aVZ ž4Fg3ۇ}:Ot+&' ؟]0mL;Kl7LԨD<&psBi%QSZ Dl,JP4TbhBƎN_wnP 煦JFf3}̫1e%s-_q -%40iݶ,hGr7n#Kv1q/;ddega.wK a$S*f`2EV}ߩSέa`"oBh[fseh X}Hu)8M0.8GFƋx+lf7`_z"`9()kּ1t(UgI)Q9Jnx0 ~ f 齀J 5hťC󬥞qVH N,A^| NǰӸRZB_ g,8b8^TG'cwYRWU!6#Q^ϫ x 7 $L _mԸ%uH EN&v܂aDlL%N_g6DžE6)Hzc -@x=g氺ĕpaȘ8<3euKFЄkDZ V?Sv_Y$NU^uԙS .d : G=";v<35:vjFcQaV!'$B4]r#U-'i`rQZ+4D Nr DcؠYRSa *լ d_?̳I diJ;Yݧ$$wcF_.ޟ+\=}Մ{r/{ځ̰N_ϫ"ud=hhu:FPƠ!W"SzNypmSz̀ҵDe!he>V&f<s7`~zznSq_nݣqީ<֍nbəVK(<|6,fBk5@àt7w!sc(qU{͎MYK=`-ưC\{4 EL-g2ӌ"tØR'#4BR0<iRd*uyl)\k%W}XQ*7.s;>\%j"w?F5n- bAClTo_=dSX-!2/>hkD!Jh >VfDc 9P.牫[< ڮ4Oi Yq6ٚrt}餽Zwʹ?ʉ1&j|vJaʀ m/a<x>` yL|H\_)VËhBZml⋷fF֑ȇ H%v'[ǃپlG{'϶%Ru2/S8Whjhޖ{so<}|_f:%6zkeL>?W35b7% /VVq C[>0K+oqHsjhCAgx6Joz?r {GHpyzC ?ͣzSkϣ{D6z}>A/ $q;}!<}9<]X&5_؏~y_ pX&K}zxAx!q:P׳ &m|~|iig"oY 6Ӵ6yA›,(HRW]*i~D ɮ9{WKX"&`0.%@`E—IѺ`{z$@ )z'!^=yAgP0aBes%C8pՀ%)#}PHF{MMRQMnIa::U iN_YeG|wx~Ou)Dd$=0St ˥Fbp<ޓ%Vd)zx!?>ڇ(aL;h&g"шSG뗝s( ].ɻxs9X?J^:c&U9.7cA> ]cȋmmf@-؝cbEV4Fi:0JNH,(3'ߌN{v No%OJSԌĵ*#\ȘLvIlաQELTv1g27oQ؈#5G\jZ&4_3{mGɆlL1K۵ddtݔn2pYFAdw7^X @*MI?$0$ _7=7I|$O$s㬀 EL؄Df 6*)F؈FP"`{28NDNJJ-yRM̀(jbihE^/NҞWR$aZ3)*]ӆ߅ ARysг[q~J\nDwN* *hʥ9=S9 t Ctx.&esr)SSS(i5N4#]S'S˖cY⌋?KS*hB*f,-NS;?m)'s45~dt=|WHP\¹kbXWjXrV.}z?|ÅQ= nn?O NNs'\ZLHa6G*ωENWY~ 4/Rh,vK W0~0 TX`ioY&CW'6V+5h8YU$LA(3, s5/6IvWTE?>r(/:W_y\} [O؛Eu՛bSwbB&_ᅱ~t}ulʻZbUQoam"-RGJBB{G@B2mGs#–D@kY5~^>F`$_/BZi'2]Rj Q~PaDXZx e=+Fwa$ջ&U*ܺ[+LSgYS '|MQ WXzi=Yjg9sevt5%n>0t5m{D/pZ7o۫nev ,G7~!x*abɱ3V0}9wﮋ_?mVw",گ d]wH^U?_h"po7U{`}7`Uŧ[xGx< f囫YX7Jw׋{P;~ao;s]7UMßKxࡱU ;o>`Jw5?6#)*8@Ujx$`HQ"ؗ$؟}Lp;b>jA7u5Z:yԴ+t\&vźDܷ/FA ~ܭglu,砶ʭ*խͪu~z%1s~m͖7j?5ſ-6f߽UˑBy5Xo\]q* Fϟs;{܎~Ba;)xFz'~q$E4DꅉC=n:dn4on1iP$nbHȅhL15xK&v Š~v紭8Un, [ p zz^h7 l@ mheeXV ٟv !.A2U?n^ڍAhP N7h|Sݟv OImbHȅhL L-{ݔƃ1h1~- jڣ-8鼩AB.\DdJ ?B[(MD'm[L ԛv /+ibHȅhL1:웗ڍk>A F1hA*5n1$E4H|Kw Š~v˙yֽiІ-\ȔČ.i2e~NW0[&jb\/FI .6i;:eL*mdJuè9SVÔu%p+F-"SʚT7QM>ʚT5QMPMFMemTڻ쳲@ZSYۨ&P-^eMIT֚f5aSUYYST6 Z>55IMDWY㦲T6 ?5v5'[Yce1B='?Awr$vЂwǭ΅wH*4w4zWkͥC8gB\ rIvu¸ԟ0Mhͧ{9~ywGY8)V+8n-)ԚP*j-4|x<|rb>! Qe8ŒKwgD^׃Cp4RufQ8RFa)D#͙v8I0TpR@h fQ;DH(8H=WLC%L*XԄ}j'T^ kQ9XW!Pb<pC ׄg K5K;0\arK:Ac#\U5!Ch!@+/=BK#sd[90h=G5&ԀP*0bެ&E"Lrgl8lqaη)!3)6gʓr4f׵ţ^;tn ^"7L Q\ls ^U\r)m&#^V$^1C?%'JL[<GtOMS)""pb/[Y'Ӆ>zq >y/}WNL.y6iw-qbB)n8VWoy(^Tb#yxh aR_yj4Uw1wsP p.e8B-=&Fz5҈y@K5Bʨum>$)8J'-mџ<:ތ@'ǿ3f!LG=BgV_o}*2r3A< `残*i@!^ 殐cw%pF,T:%᭐s|0m%Y Οu'񧼰˲\s:1?B3L;t(BTD6!rwŻGP0Ge0oi2휋؃FjZNԸAˍ<$\t65kC6{ ٹ5@ lqUp"YVkħE'I3|N2ǨJ} 1'J%J2pu-׳jKŗZ*!1Be)HAhH I+A qH@G Dq "-vcx|Y )gsJ?jor W2Yϧ?u%a%Rj%h>zŸVjb"" K$CsjHXw^lRm_lRWYma{㯦H1t3G룬MSH=_u( -TJCN{rB(4hhτYd:$%E.S*[ڽjck)7EeHPΔ`.@Մ4_TyDt}ͫ x}3b=Ps8}o;g,Ȉ5\Ed9'd̈́G>kD^3c=3ӍAl˹ƾ7 B$f:YmІHxDwlEhuƪKdJ?L i㽻 #[nfI%L._j!-Wt!&C0B .B4V*={7ga~nbxYķԩ8n60OMF[^=qӣgp LǴkzݿnLf5G4m0#~'du>܀)C+W`^bU)d y: (Rs-f!XFoRv9쁱e?)xhȰ/H'lFGۧ~gu*lsvQ6?'w'6W0ϡM͠n_2ۚs}pюfiyqPie ϧ۷sR>;qYhaR|.ؤ] }>1*Ϟ@?<٬3Orߟp8w={=OӛW7>yv:wf :9|ss~7N?{ $_~͎ϟ1r%n?d@zo^|y6oKqˆRw}1G?vS˳='2.T9}4OL?{W6俊?r߇vgI v45d1ݷeJ(SD3Ku]bU8-g$oˎ7g'O|vk<,<ҁ]}y}tvᑅnmh9]GwLn:2O kv`1X33Yu\@ˮG@K>qFGhWx 0|&7"0vkRN҅fdYeOų$ qxt'v=c~꒜/7Л4m:8L?;`?(pn8te\)\zL/ӉnzËyc/Skz9ߟ+{/t5 |DA2HAa9ߝp4:}?df7;~3 W &τrX^^/6hxdx6#7ҝ_ހ)#'~~eA.r\FlJ<+;hUtqOtYO80۷sv@y󍇔;H2Lb. pT8/MYfs~+=FY e?lze_rt\~h' $LN-Q*$%+OP+7Yrb'!he,S-2  " N0džp@r%hJy<_.sZM?FxF8gv!+-Vڹ;3i`l %ݘI#R':*k'esx5O ~t+BdMk%еW i!9`3?]U8MT-^De*GEwJd$ԐGn졟J,q0m}aH *H"rH┑2Š$"6o ;.J]NP]`U&XG׋"6DRwU+OT#J(g挥ւRȂ#bFuHӘGX9[,94 Y-8l] Q+}QAA)iǀdACF0ZmIvµ!S͡Hj(Nng)Fz1dd;kYA$dd;1| "Eo $TH6I婒kVST-~-#'exK+ GI, pA$zE OrV0K=3!Jh[JF k@G/ōůDzg $vJx!#F1WXa( %Z1zzX X]ѷb-~V2?7[5}"p a&4߈tlY'o:mbuLBD,O]3 ,=Ռ+<(+|s}#mǓ[kuV" 4!6'k!뭲=xD> IQWTp,Q&w4dp,RZgg; -L*7Jt9. m*jUsFg#^MkD)ɓcgU$O(duЀuZ>,FL8?3NS$2CI\3i0C$J&MYARv[\NBA&JƚV?wx<B\{"MlQ 霰jAhtՏ!+Y ON3Es^S0E^1hkqᝤh񽕐]2ׁlER[ J/G-%{-CzC*؎6rZl?{wG6&7F&c/ZDIB#;B(Vy z[2'Zj׬0Ks,MPOC d :" e5dmBYnR'lV;` )U[<9|sy%$ j{Z$⤸Va-N/B{(W %0AF"i dJ+ URBU`WD֗m- 29-TUJ1;HsR03%C`ͭp2Bp9W)\.Ʃ J\ pyU\diMYVPnTdlC[.i`+bV=Z3s ^eeƮ$>` J"}ʵuH2) ]P=Ɓ.s^a*)Q"G˄p:態Ae?y̋vhK y {:qQ@irƈs7<&,xvg'[pk {ͽ6q[gP:q C / 0yu xN:{N;N_乕/li7/y .d&p?p=NٳM?Kk(J@T܃HtDZR<ǘ9CuZMZ{&QH^ͭutC_u i' B>PlY{ iC׻Oܭˉ(٫ E}<L}WQ|L([3q)\\e ]zv.ԛL) v40jEk|maopRy'(9/_Lȸn)pn,(^3rFAMfлxK~@ݙqr4gÑ9͢ݗGw{/ ,t+o;@:]>LӅ~Hl Bz2!3Yu\@ˮG@K>qFGhWx 0|&7"0vkR/%H{nKEKsSKqYA_/ʰVR7\X+8Ϭq5!0%DvD8뽎CV\m4#Rh@X֏[^2ϗi%rq+R75bNc'FR3L( XaPS,US܈m`[\fU`c%9ڔ UT܄ǣGUz*FrPP$ڹ85ɂ$]UD+8 1'ER!1l(DXcwm!DGRG[ˑ*D%jPR̢tk{m@8Wյcǹkh +"!EC[v<* bN+=u 6Z){SN DC" mh\JOnLI.k j`$cE w~*W^I 4U*Wj5bu3uYQAyu_,ǘF[g*KFfHKe"損`#$Xm\ǰf!~];?^֭0zF t͟w< [鷐Dgڛ6WamV`ʱ+9ˏM]eS` s- I9vT "ć= Rig{z{=֟)Rk_5#36;d<P1PBء*H3! Ӹ=0-98kɩT#TAy3pM槺&bSe ZR#mYOHrxREg9omR+s] JJ6`#i»m\1(v5fft7~QSxHF5l[ڮfY3bW'(u3kia5[ m9ΨTBt]E &>vwטPP?9Y0r'B@;Djj#j̓Z(M;Hr$еXYzyDeZZѧ %?01.Ŭ 9ss|dUFUkS(+vV* RL90g PC56$.Xp̱hʠCآa5$~5k*"Ae^ u'8% &2:H*AD##ܨMyXI-S+N4MToaӧ%Fr)AҠ\ffJ2]GTR˂pjr˱$/c81: #ž@& \̂5VSR a=>jQXǥOL gH)xΈphDy_$̈ ׉w[u,-q].pb ;g.I3\*XDdB 4w!&3j dJPˌ;I` _o0ݬ> AGYn`CGxfOw"W1yټS#s0;{4DmN짬m<\"vEe:_=S F|2H0)N'#p>f:<%8` u$St3?J8O S4xAGSy!x%R'x4G=G,gR?ʭ kkJSgMiZFXc(97ѦT,TTͩ!)HS|6te2[+hj5m`J=.`J4bJ8O5ٔVbbKHNM LR| J( VBld\ny`@)l_|B+<<=,حMXFUKpwneHMKX&mbmuNF6*v {HsagڈAnxHm2z86F#f; 6blZ9L)[B%bٷ6[U(*imj{x}4MuH1gɌ)a s i"Km|,Kmp9긭#Bj2{aŔpK .Q@ S{:4}jw[Q/dܥVՌaM` J 7bӋ'Qmel` ؚշ0pb˜Z}c{4'66XORv[jN bZU Sv]`YK4Vi68mr_'vT4Q.6|DSǿb ×+f5 64Zt"X&VC Ǚ |UHM4hJr]nFZDyv:*6MrbZ=䊑wj$"cԢخs aum\5CgwVm䊈]<r"F 7ޟ]-R(Q5yuC`4ߑ\>  n}btntO-x{ IEz0K8y:LMussCI?unq5%G,]|H $xXWnt .yOUb7q~.&8ug<~ygÑ;ϝ>o~d>6V7Ngzoﲗ7p\ d:3n|dNJ&'w=ɂMS<_}1..=?۫0~ǰ:O>4?.uy>'/Cj$G51aaC7*p4~(y<};|Nrv&]|GyGOQU:sR keFưH>̤7 ,\ի}Bৗfiy..߽>O]x&.h7/eə 7YRz녶_Ξ)yŁNͲ:o־ ;;:}u@yt [}t(v(v(]Ê)NxA4h`H ܧVO87U9aGtr:p tt.|`8 `8M97kîrzdz*9u^ 3|%b1c4R"Of_ObM5U~eRD^!-Yigg6: |DbQ{^!rqdG&%< ޛE&3--?t-G߳y7unFoBJ?չ8-;OγvFgҤYRG eG/mIXW6OQ`{0&i&2nglڜk,Z}nj'EnҤ+uO`6%ڕԵ-!ͩYȣKFoHy8"kPI2JdG[¥IJB"cUETe+`xU&X * Ֆd(u6ƳL3 ftP~6n 0VmTnm(%*4v=ʉ|x`Rvӡ+`dܜb= ?UJE#d2Aڜ$`ԘҦL@rޮkSYUC@k2K۬6Tі/| AeWɦ[ A*Zp\>b *'"2keyMDւ.㯰3L`ʊC!\)* 0AÏ<dOJވ?:{zF>Dmo=B0ge wsPf Ąk<C?tF+|yʶfQҝxVx2,ׂ=|/ȅc7Ob$pq t{ZؼҶeF3`835zx0ρqRe0*% !dwkhxa4"aXS*h;ի1G[9;-2AK*s6vΗДΎ?*$nM5}U)T ,a8A^li* d03HKH@YMBF]460j*Iv,]Tƨ$"g/"ޯQcg; cx2>+yEZ/<G? uX&c0LKMF✖s6]ʵT~\nK,FðiӋ,ޔ5$!߹֑)ᅪ^nF FtQEœ6mjg#[h-*v㢄GV1S*m[Sδ[mBc[LEbaq Dh#X )^'mu3gv}Fl==$.ɬ /DN8D<'q$J#Ʒrݳ,*;̲$1Sg;>.\i= AA4<~i%n-m0?LʲHkqREi{0 z-ʑjc!-j'>!"g)_:U$ Tv}Su%'@*f?+FH00N1/^Ӯ YnQM(QA,3/3IqsXfwiǽþ(g۸b#6 dtqt$-mapHNlzI {8#[#Y9Yq{Z σq'dfxZQ1ƘWlR5u"&&t0R3D8DJCBO7ֽ礶Z.T` p("+#琎TN,A 3k G#}Q+D/ݕL$uǕcbS T[mVH2bJL2tԗA?h[EXXc83jcYnrQl̥`eTT6qO蠻:ZFx;dPx D 8+~S@&4D;8=prgiJw)js4֑#DMla´8?k;q ̧ˠ Vm?3AhJkK;V[4ĩy?$>:a" ^ugQ0b3|s/n"ݏ*]|Ϫ u;`HZ po)bRaw"ūјu2Ty~ݧKzޝHqM$Q/{Қ=$24\.@bjx"nH\#9Br8|B7a-jdQ{6P/Swt1nQs!E"kpP QQn)#˱p9OC*  PI4.>D5sm T-md?O7R- ~n[kz*GǶ{Ўc޻7#:& 4ЇZmϊ=m5u=25l5R58am\7ܞ;۞k)\Jt(Вuv]PbGΑ֦o{j9d#\;㻷Zgy'I{A?$g}Wb"kh0u!~f9_V1 mф)fhE2|t!_.Сw@\Btki&/)`FF.<+wV>%gw uھIt{YPJ[yѭ ]tçXOTTa1YbptqAR#wHt.KGґG(d1Q&7@X^e?R854cǧ6S֐סΫ!qaXUe/ ݥfon ?㗓3 = [~'"?:Aߏ_Lφ~ 6`Ft |qu}L[_9lx[b4lvzt6^,GWWe?G>;oy}\ь8aD)[p˱1nT~q.U#% RwiJQn~<f< cE^~^^N."mltonYkV`ϼ{7}.>W OkY:Y%6;YYY.{}|sgM?m!-|u-!6}3? 0V%"8~Ksae΅H=׿,oK{Wi5JϠ!DX8Y}?|mi=g>lyx~7,s?˿ |yN`_-s4y=y?KoV|ZS{l#kA}/- Sy9mxnMg-& 45Ki}LJQiP:"،4nL@JUO X!L>f&Oe\)ʤ(L2Em֣L GSIQ&EeD#yf~¯輘O2;f)L9cG".O7l@YX]Y)qV~0p}Z~^Ck#oz*d{y{d|; f6??+AHHm5=S(6ұ:&P"MUYbVF)>we5d~*#]%ScsÏe@A 76%%¼ ޕ%V?'/mSߞlMɪT?fpo !,rBV,%5;hG!d2Ra@ x+|tT%~b^6A _8s3RC3Jw,?/k1Jw`{bjO6F 7[uNX"ľ4i?O|&Y{AIbepEL _pKgP[ sRl'GySCt=ו em_4 ($A=oLABY'1ӥ&2A6a B9 ~~OvSHƿTiJ"R?;v ¸;E FxғΗ)Q`+%?fu V Dpb k%P g7اGh5G5(D 1]Lr/PJ 9y ,i6`7.O4pR3`|$Qj"H@q;K6hdQA{FK)IM%B@/ "=2M(p/) "%HuBVq9"Ee (N}M`tN  +f7HϑFd yE o|9|%"j^ys)) Sl"ռ W|Tzs9p<,F6 0׽ Rk9!:Gs*:3gKvƕǜ|wξŹ$[FGKu$2C^H2E, 68nxCZьvM eJeogǣ0,Ϝ"c4̂ϤiFjX$gָl:|ԔrA$NPWci+O)yLK 9q {p`9.=?W mhՊ"K4bx+XCAŽ%&RXZAu @I+μզ0yT>H6+:5Ҷ9;,L 3$7M B^3~1|k~g&JVk4n̟KЗ9֨s$%vY`z^o‚lIbUKآ3b ƴ/y3Mbύ6кSrB`eqCјt5&˚[S #n:`Rܠtk ^Q1Bm4{t|/[S #ns07&ݚUnm{>D/js}AWҐ m"(AxL=io2ih`/t*ՅiW^'b"T;HsQJe-Ȳ\rXΫY ?.V\EaJ9DslUI|PIH)O@TwN⌇3Ɣl4ƨ0^3n-A`@ΔJYfdf*[Jnȏs_c2ŀ/ֆc _Y'J~fTpRŰ=~+f>{{D,&4\Y& =EgNSB6P$u!٧2(xtnkj&\EӀZQnu3&'qrC`1ePE=G`<%&%{N{l0unXɬI-Eu .anbdԌAAnɢ. m1έ.ķx?5I6̩O A3JyDK-g׼,[hKWxƌY,9TGSvي7[$cL޾;SLZt"d̀ܪVYyV[y$:& X:*H{KY+8>S ˮsdP^{O΃"sD[O$#j pƎu]MMXN #\G"Nw,AKlL$<> 㰜_$g6fD2Li< hԈ F+)ǃ/ ǂ=bj]@ՖMygyLI|{"%6o<8.m (elrZ8\wZHz+!=mH- Er, )10'%`<(=sXzzRݹnw۝vjΑ<쯷mzaJK Ԁ>b 9%zD%5b.(QTHc.v>" #,:Jl {XڂJi)RaM瑈[IPqGҰLD3ЯT&86,["r}`8I62hDp}8 Llnv, hh epD_%7i*cw8543 >aaX΂nb>8iAxdDfVLS3e6@ozC;²t#"SJnD NvDӍ>Gqzǁ] A;ܞO4Ԗ1K(b FQ>2ʑvTCcEf݌ "q`TQlCƤuQf̋aPTk8m ]["&m;%M R8@|UqB0w+&fb|S 3BFIX4dl!3#$&RBJ2#zac5P+Xݟ,HJv-aSj%p%r"i~襄P~m֤VB-ThzM1\gt6C\Lz<|& >ho$XfYf8uUA8TSSza]߿}?# Y1M ! H*5WLʽUl%H- -I9W잌bɘ^޴]޽&ZhrZ1Ѕ>fJ=k%)̪EPc]tLEt1cXFXV@BYI [b"wT;Jr:H$fDYA RHHp8JbIFS]9S"朩 *oS ) -՝O?SbQ>0cjF$K%n,wFdgDvFdgDV1"&eNKU)DL'eH|RFY4`NtR2"e&e+,խ%*ϳ^ʪ5s3tT$aq*E{?G+c~6 b޴~j2aQgH،}R_4]J\џ楁: rxrDX7CL}hf~8d8;:x9-s ҍ/O.B1XGgWw,Ó@rwKkty;=Ũ/GIIp0F}9(8aD)x_Sz$2ޔ8M_uuη(/ƣOGь  _:Z!yjiϫfy;}-l>|M\VĮ9z]/C5x_td1[^Gڬr8m/Z,h*~98Go=>Q0;ё޴ʕ[tއ2Ph>=NaWE^!:|\v;C??>>M'@_?hy9e&χԛyArhxFo觜'g8[?COE8Y}h4|8ӹ3d9_ 8E~a^ k ϥ֣ &iޗW j/ƠϿ++?St*0_7p`f48}|fbKh. ]i `*= Y7 sy~mqt[9\)Bz%ҥhFeD!{ƛ3$1*_7 rĥ0`YDs橤D*ĈDSD )0d1ou)kB1R-&^Xaٹ/zY*}+|c,4@0%2|nZWNYYy?An!+ ˕߰;&I:!jWd\ 7WQ7-R95O7 ΙP"yo 8D/)R!cI`JT'qH%/)vYy^\מ+4ዧ.V5@K.ARW6{cއQ/m1XG<8YO4V[I-BqgxEYB~'3@Ŧ <5]ta!{F+]Ye|" 69yaMYYXbwj)mm%6*VO b{n {jX^+D|]kI!(B΅׵ݙ {CUB5BP YJƭk!8dAÿ0n0wNd0W_iun<·ƸqVB@xtXdrP֬`:)#Hja|\LgMHNUɝA=TG,^|?K_f:ell(fGdJqߎ!ݠ12FO%5-RTVШThwQu,}q]"WER" %O h;&:(3PRӸ|Wἒ1x6H%ƱqgQL@MgW a/w;ŵ=]EQqCMe$2LɡNj0HX^ј)S;ߝj%eIÛNɯJɯI~;˪Y"$I(PʨcucX7Œ"H]KŽ?=ݖ3sdigB R3iT\WjUcz~l+n|/v,~m̘W|/(ߔ0NN?|: חnuCSGYЇ%Mk(/ `z2N]3Pͬ#Zj( Y y?諈WЕŦ&(-Xwd,$yeVTcEչ~N"u&4g:t.>^\7*J's  L-/xUq} P:\%Lg|Ll:]dAuWUcMG?])`n*xʋq?F!Yݵm&xcR{6֔~v]ųJt){O~DP< S. xt=Ly\iȢLQ- FX~ZtBRnK T -nSI7{Mh.k-8`ɒ{wFl'[ ?WH7 EQQZxJP j+ej9e|hHAgJ(ig4yUa8+_`DKc~x2ByK͘3J_ծ"R[io87"q(~Zݰ0%Ūu_ww 1Z]Ac]t8"c/4*q}Y map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.683468819 +0000 UTC m=+0.756074236,LastTimestamp:2026-01-30 00:09:40.683468819 +0000 UTC m=+0.756074236,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.700505 5113 volume_manager.go:295] "The desired_state_of_world populator starts" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.700541 5113 volume_manager.go:297] "Starting Kubelet Volume Manager" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.700620 5113 desired_state_of_world_populator.go:150] "Desired state populator starts to run" Jan 30 00:09:40 crc kubenswrapper[5113]: E0130 00:09:40.700811 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" Jan 30 00:09:40 crc kubenswrapper[5113]: E0130 00:09:40.702315 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="200ms" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.703513 5113 factory.go:55] Registering systemd factory Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.703610 5113 factory.go:223] Registration of the systemd container factory successfully Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.705358 5113 factory.go:153] Registering CRI-O factory Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.705408 5113 factory.go:223] Registration of the crio container factory successfully Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.705594 5113 factory.go:221] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.705630 5113 factory.go:103] Registering Raw factory Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.705653 5113 manager.go:1196] Started watching for new ooms in manager Jan 30 00:09:40 crc kubenswrapper[5113]: E0130 00:09:40.705667 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.706503 5113 manager.go:319] Starting recovery of all containers Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.746037 5113 manager.go:324] Recovery completed Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.765600 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.765973 5113 kubelet_network_linux.go:49] "Initialized iptables rules." protocol="IPv4" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769218 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ee8fbd3-1f81-4666-96da-5afc70819f1a" volumeName="kubernetes.io/projected/6ee8fbd3-1f81-4666-96da-5afc70819f1a-kube-api-access-d4tqq" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769279 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="92dfbade-90b6-4169-8c07-72cff7f2c82b" volumeName="kubernetes.io/projected/92dfbade-90b6-4169-8c07-72cff7f2c82b-kube-api-access-4g8ts" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769293 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-etcd-serving-ca" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769306 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f7e2c886-118e-43bb-bef1-c78134de392b" volumeName="kubernetes.io/projected/f7e2c886-118e-43bb-bef1-c78134de392b-kube-api-access-6g4lr" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769320 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="149b3c48-e17c-4a66-a835-d86dabf6ff13" volumeName="kubernetes.io/empty-dir/149b3c48-e17c-4a66-a835-d86dabf6ff13-catalog-content" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769333 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="17b87002-b798-480a-8e17-83053d698239" volumeName="kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769367 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="94a6e063-3d1a-4d44-875d-185291448c31" volumeName="kubernetes.io/projected/94a6e063-3d1a-4d44-875d-185291448c31-kube-api-access-4hb7m" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769378 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cc85e424-18b2-4924-920b-bd291a8c4b01" volumeName="kubernetes.io/projected/cc85e424-18b2-4924-920b-bd291a8c4b01-kube-api-access-xfp5s" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769384 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769436 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769451 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769395 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f559dfa3-3917-43a2-97f6-61ddfda10e93" volumeName="kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-etcd-serving-ca" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769571 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" volumeName="kubernetes.io/projected/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-bound-sa-token" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769610 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09cfa50b-4138-4585-a53e-64dd3ab73335" volumeName="kubernetes.io/secret/09cfa50b-4138-4585-a53e-64dd3ab73335-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769625 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31fa8943-81cc-4750-a0b7-0fa9ab5af883" volumeName="kubernetes.io/empty-dir/31fa8943-81cc-4750-a0b7-0fa9ab5af883-utilities" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769638 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="736c54fe-349c-4bb9-870a-d1c1d1c03831" volumeName="kubernetes.io/empty-dir/736c54fe-349c-4bb9-870a-d1c1d1c03831-tmp" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769658 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9f71a554-e414-4bc3-96d2-674060397afe" volumeName="kubernetes.io/projected/9f71a554-e414-4bc3-96d2-674060397afe-kube-api-access-ftwb6" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769681 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" volumeName="kubernetes.io/projected/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-kube-api-access-5lcfw" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769700 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="42a11a02-47e1-488f-b270-2679d3298b0e" volumeName="kubernetes.io/projected/42a11a02-47e1-488f-b270-2679d3298b0e-kube-api-access-qgrkj" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769714 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9f0bc7fcb0822a2c13eb2d22cd8c0641" volumeName="kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-tmp-dir" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769726 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" volumeName="kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-trusted-ca-bundle" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769739 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-audit-policies" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769754 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" volumeName="kubernetes.io/projected/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-kube-api-access-ddlk9" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769766 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9e9b5059-1b3e-4067-a63d-2952cbe863af" volumeName="kubernetes.io/empty-dir/9e9b5059-1b3e-4067-a63d-2952cbe863af-ca-trust-extracted" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769782 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a7a88189-c967-4640-879e-27665747f20c" volumeName="kubernetes.io/secret/a7a88189-c967-4640-879e-27665747f20c-apiservice-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769800 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" volumeName="kubernetes.io/empty-dir/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-tmp" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769812 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" volumeName="kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769825 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" volumeName="kubernetes.io/secret/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769836 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" volumeName="kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769851 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a" volumeName="kubernetes.io/empty-dir/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-catalog-content" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769865 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7afa918d-be67-40a6-803c-d3b0ae99d815" volumeName="kubernetes.io/secret/7afa918d-be67-40a6-803c-d3b0ae99d815-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769881 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="869851b9-7ffb-4af0-b166-1d8aa40a5f80" volumeName="kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-cni-binary-copy" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769892 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="869851b9-7ffb-4af0-b166-1d8aa40a5f80" volumeName="kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-whereabouts-flatfile-configmap" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769904 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="2325ffef-9d5b-447f-b00e-3efc429acefe" volumeName="kubernetes.io/secret/2325ffef-9d5b-447f-b00e-3efc429acefe-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769915 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6077b63e-53a2-4f96-9d56-1ce0324e4913" volumeName="kubernetes.io/projected/6077b63e-53a2-4f96-9d56-1ce0324e4913-kube-api-access-zth6t" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769929 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5ebfebf6-3ecd-458e-943f-bb25b52e2718" volumeName="kubernetes.io/projected/5ebfebf6-3ecd-458e-943f-bb25b52e2718-kube-api-access-l87hs" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769940 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="736c54fe-349c-4bb9-870a-d1c1d1c03831" volumeName="kubernetes.io/configmap/736c54fe-349c-4bb9-870a-d1c1d1c03831-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769954 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e1d2a42d-af1d-4054-9618-ab545e0ed8b7" volumeName="kubernetes.io/projected/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-kube-api-access-9z4sw" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769966 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="94a6e063-3d1a-4d44-875d-185291448c31" volumeName="kubernetes.io/empty-dir/94a6e063-3d1a-4d44-875d-185291448c31-utilities" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769978 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" volumeName="kubernetes.io/empty-dir/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-tmp" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.769990 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cc85e424-18b2-4924-920b-bd291a8c4b01" volumeName="kubernetes.io/empty-dir/cc85e424-18b2-4924-920b-bd291a8c4b01-catalog-content" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770002 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31fa8943-81cc-4750-a0b7-0fa9ab5af883" volumeName="kubernetes.io/empty-dir/31fa8943-81cc-4750-a0b7-0fa9ab5af883-catalog-content" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770017 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-error" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770028 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" volumeName="kubernetes.io/empty-dir/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-tmpfs" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770039 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" volumeName="kubernetes.io/secret/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-marketplace-operator-metrics" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770050 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c491984c-7d4b-44aa-8c1e-d7974424fa47" volumeName="kubernetes.io/secret/c491984c-7d4b-44aa-8c1e-d7974424fa47-machine-api-operator-tls" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770062 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cc85e424-18b2-4924-920b-bd291a8c4b01" volumeName="kubernetes.io/empty-dir/cc85e424-18b2-4924-920b-bd291a8c4b01-utilities" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770074 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770085 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7afa918d-be67-40a6-803c-d3b0ae99d815" volumeName="kubernetes.io/projected/7afa918d-be67-40a6-803c-d3b0ae99d815-kube-api-access" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770098 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a52afe44-fb37-46ed-a1f8-bf39727a3cbe" volumeName="kubernetes.io/secret/a52afe44-fb37-46ed-a1f8-bf39727a3cbe-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770111 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d565531a-ff86-4608-9d19-767de01ac31b" volumeName="kubernetes.io/secret/d565531a-ff86-4608-9d19-767de01ac31b-proxy-tls" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770120 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="18f80adb-c1c3-49ba-8ee4-932c851d3897" volumeName="kubernetes.io/configmap/18f80adb-c1c3-49ba-8ee4-932c851d3897-service-ca-bundle" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770130 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="593a3561-7760-45c5-8f91-5aaef7475d0f" volumeName="kubernetes.io/projected/593a3561-7760-45c5-8f91-5aaef7475d0f-kube-api-access-sbc2l" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770140 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-session" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770150 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/projected/d19cb085-0c5b-4810-b654-ce7923221d90-kube-api-access-m5lgh" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770161 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f559dfa3-3917-43a2-97f6-61ddfda10e93" volumeName="kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-audit-policies" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770174 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff" volumeName="kubernetes.io/projected/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-kube-api-access-xxfcv" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770184 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d7e8f42f-dc0e-424b-bb56-5ec849834888" volumeName="kubernetes.io/projected/d7e8f42f-dc0e-424b-bb56-5ec849834888-kube-api-access" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770194 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4" volumeName="kubernetes.io/secret/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4-metrics-certs" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770213 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="736c54fe-349c-4bb9-870a-d1c1d1c03831" volumeName="kubernetes.io/projected/736c54fe-349c-4bb9-870a-d1c1d1c03831-kube-api-access-6dmhf" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770224 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4" volumeName="kubernetes.io/projected/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4-kube-api-access-pgx6b" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770236 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="584e1f4a-8205-47d7-8efb-3afc6017c4c9" volumeName="kubernetes.io/empty-dir/584e1f4a-8205-47d7-8efb-3afc6017c4c9-utilities" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770246 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c5f2bfad-70f6-4185-a3d9-81ce12720767" volumeName="kubernetes.io/empty-dir/c5f2bfad-70f6-4185-a3d9-81ce12720767-tmp-dir" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770259 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-audit" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770270 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770280 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b4750666-1362-4001-abd0-6f89964cc621" volumeName="kubernetes.io/projected/b4750666-1362-4001-abd0-6f89964cc621-kube-api-access-twvbl" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770292 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" volumeName="kubernetes.io/configmap/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-trusted-ca" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770308 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="593a3561-7760-45c5-8f91-5aaef7475d0f" volumeName="kubernetes.io/secret/593a3561-7760-45c5-8f91-5aaef7475d0f-node-bootstrap-token" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770318 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" volumeName="kubernetes.io/configmap/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-marketplace-trusted-ca" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770331 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01080b46-74f1-4191-8755-5152a57b3b25" volumeName="kubernetes.io/secret/01080b46-74f1-4191-8755-5152a57b3b25-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770349 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="81e39f7b-62e4-4fc9-992a-6535ce127a02" volumeName="kubernetes.io/configmap/81e39f7b-62e4-4fc9-992a-6535ce127a02-cni-binary-copy" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770360 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="81e39f7b-62e4-4fc9-992a-6535ce127a02" volumeName="kubernetes.io/configmap/81e39f7b-62e4-4fc9-992a-6535ce127a02-multus-daemon-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770372 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="af33e427-6803-48c2-a76a-dd9deb7cbf9a" volumeName="kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovnkube-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770415 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="16bdd140-dce1-464c-ab47-dd5798d1d256" volumeName="kubernetes.io/secret/16bdd140-dce1-464c-ab47-dd5798d1d256-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770430 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" volumeName="kubernetes.io/secret/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770440 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7599e0b6-bddf-4def-b7f2-0b32206e8651" volumeName="kubernetes.io/configmap/7599e0b6-bddf-4def-b7f2-0b32206e8651-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770452 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a555ff2e-0be6-46d5-897d-863bb92ae2b3" volumeName="kubernetes.io/projected/a555ff2e-0be6-46d5-897d-863bb92ae2b3-kube-api-access-8pskd" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770464 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="af33e427-6803-48c2-a76a-dd9deb7cbf9a" volumeName="kubernetes.io/projected/af33e427-6803-48c2-a76a-dd9deb7cbf9a-kube-api-access-z5rsr" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770474 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="18f80adb-c1c3-49ba-8ee4-932c851d3897" volumeName="kubernetes.io/projected/18f80adb-c1c3-49ba-8ee4-932c851d3897-kube-api-access-wbmqg" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770486 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7df94c10-441d-4386-93a6-6730fb7bcde0" volumeName="kubernetes.io/configmap/7df94c10-441d-4386-93a6-6730fb7bcde0-env-overrides" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770498 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" volumeName="kubernetes.io/secret/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-profile-collector-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770511 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f65c0ac1-8bca-454d-a2e6-e35cb418beac" volumeName="kubernetes.io/empty-dir/f65c0ac1-8bca-454d-a2e6-e35cb418beac-tmp-dir" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770749 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f7e2c886-118e-43bb-bef1-c78134de392b" volumeName="kubernetes.io/empty-dir/f7e2c886-118e-43bb-bef1-c78134de392b-tmp-dir" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770765 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9f0bc7fcb0822a2c13eb2d22cd8c0641" volumeName="kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-var-run-kubernetes" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770777 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-encryption-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770790 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="149b3c48-e17c-4a66-a835-d86dabf6ff13" volumeName="kubernetes.io/projected/149b3c48-e17c-4a66-a835-d86dabf6ff13-kube-api-access-wj4qr" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770801 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3a14caf222afb62aaabdc47808b6f944" volumeName="kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-tmp-dir" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770812 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="736c54fe-349c-4bb9-870a-d1c1d1c03831" volumeName="kubernetes.io/secret/736c54fe-349c-4bb9-870a-d1c1d1c03831-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770824 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7afa918d-be67-40a6-803c-d3b0ae99d815" volumeName="kubernetes.io/configmap/7afa918d-be67-40a6-803c-d3b0ae99d815-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770834 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="869851b9-7ffb-4af0-b166-1d8aa40a5f80" volumeName="kubernetes.io/projected/869851b9-7ffb-4af0-b166-1d8aa40a5f80-kube-api-access-mjwtd" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770845 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9f0bc7fcb0822a2c13eb2d22cd8c0641" volumeName="kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-ca-trust-dir" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770855 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b605f283-6f2e-42da-a838-54421690f7d0" volumeName="kubernetes.io/empty-dir/b605f283-6f2e-42da-a838-54421690f7d0-utilities" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770875 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770886 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a52afe44-fb37-46ed-a1f8-bf39727a3cbe" volumeName="kubernetes.io/projected/a52afe44-fb37-46ed-a1f8-bf39727a3cbe-kube-api-access-rzt4w" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770896 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f559dfa3-3917-43a2-97f6-61ddfda10e93" volumeName="kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-trusted-ca-bundle" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770905 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3a14caf222afb62aaabdc47808b6f944" volumeName="kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-ca-bundle-dir" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770915 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="567683bd-0efc-4f21-b076-e28559628404" volumeName="kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-etcd-service-ca" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770927 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770936 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/projected/6edfcf45-925b-4eff-b940-95b6fc0b85d4-kube-api-access-8nb9c" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770949 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7599e0b6-bddf-4def-b7f2-0b32206e8651" volumeName="kubernetes.io/secret/7599e0b6-bddf-4def-b7f2-0b32206e8651-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770959 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7df94c10-441d-4386-93a6-6730fb7bcde0" volumeName="kubernetes.io/projected/7df94c10-441d-4386-93a6-6730fb7bcde0-kube-api-access-nmmzf" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770972 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c5f2bfad-70f6-4185-a3d9-81ce12720767" volumeName="kubernetes.io/secret/c5f2bfad-70f6-4185-a3d9-81ce12720767-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770983 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0dd0fbac-8c0d-4228-8faa-abbeedabf7db" volumeName="kubernetes.io/projected/0dd0fbac-8c0d-4228-8faa-abbeedabf7db-kube-api-access-q4smf" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.770993 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="af41de71-79cf-4590-bbe9-9e8b848862cb" volumeName="kubernetes.io/projected/af41de71-79cf-4590-bbe9-9e8b848862cb-kube-api-access-d7cps" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771003 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c491984c-7d4b-44aa-8c1e-d7974424fa47" volumeName="kubernetes.io/configmap/c491984c-7d4b-44aa-8c1e-d7974424fa47-images" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771013 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d565531a-ff86-4608-9d19-767de01ac31b" volumeName="kubernetes.io/projected/d565531a-ff86-4608-9d19-767de01ac31b-kube-api-access-99zj9" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771024 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a7a88189-c967-4640-879e-27665747f20c" volumeName="kubernetes.io/projected/a7a88189-c967-4640-879e-27665747f20c-kube-api-access-8nspp" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771035 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e1d2a42d-af1d-4054-9618-ab545e0ed8b7" volumeName="kubernetes.io/secret/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-proxy-tls" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771046 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="301e1965-1754-483d-b6cc-bfae7038bbca" volumeName="kubernetes.io/secret/301e1965-1754-483d-b6cc-bfae7038bbca-profile-collector-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771059 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6077b63e-53a2-4f96-9d56-1ce0324e4913" volumeName="kubernetes.io/secret/6077b63e-53a2-4f96-9d56-1ce0324e4913-metrics-tls" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771070 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ce090a97-9ab6-4c40-a719-64ff2acd9778" volumeName="kubernetes.io/projected/ce090a97-9ab6-4c40-a719-64ff2acd9778-kube-api-access-xnxbn" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771085 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e1d2a42d-af1d-4054-9618-ab545e0ed8b7" volumeName="kubernetes.io/configmap/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-mcd-auth-proxy-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771097 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" volumeName="kubernetes.io/secret/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-oauth-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771110 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="af33e427-6803-48c2-a76a-dd9deb7cbf9a" volumeName="kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-env-overrides" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771123 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="18f80adb-c1c3-49ba-8ee4-932c851d3897" volumeName="kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-metrics-certs" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771148 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9e9b5059-1b3e-4067-a63d-2952cbe863af" volumeName="kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-bound-sa-token" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771159 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="af33e427-6803-48c2-a76a-dd9deb7cbf9a" volumeName="kubernetes.io/secret/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovn-node-metrics-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771170 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01080b46-74f1-4191-8755-5152a57b3b25" volumeName="kubernetes.io/projected/01080b46-74f1-4191-8755-5152a57b3b25-kube-api-access-w94wk" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771181 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c491984c-7d4b-44aa-8c1e-d7974424fa47" volumeName="kubernetes.io/configmap/c491984c-7d4b-44aa-8c1e-d7974424fa47-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771194 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="149b3c48-e17c-4a66-a835-d86dabf6ff13" volumeName="kubernetes.io/empty-dir/149b3c48-e17c-4a66-a835-d86dabf6ff13-utilities" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771205 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5ebfebf6-3ecd-458e-943f-bb25b52e2718" volumeName="kubernetes.io/configmap/5ebfebf6-3ecd-458e-943f-bb25b52e2718-serviceca" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771216 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771227 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771240 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="81e39f7b-62e4-4fc9-992a-6535ce127a02" volumeName="kubernetes.io/projected/81e39f7b-62e4-4fc9-992a-6535ce127a02-kube-api-access-pllx6" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771242 5113 cpu_manager.go:222] "Starting CPU manager" policy="none" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771261 5113 cpu_manager.go:223] "Reconciling" reconcilePeriod="10s" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771291 5113 state_mem.go:36] "Initialized new in-memory state store" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771252 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9e9b5059-1b3e-4067-a63d-2952cbe863af" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771409 5113 kubelet_network_linux.go:49] "Initialized iptables rules." protocol="IPv6" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771459 5113 status_manager.go:230] "Starting to sync pod status with apiserver" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771498 5113 watchdog_linux.go:127] "Systemd watchdog is not enabled or the interval is invalid, so health checking will not be started." Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.771513 5113 kubelet.go:2451] "Starting kubelet main sync loop" Jan 30 00:09:40 crc kubenswrapper[5113]: E0130 00:09:40.771594 5113 kubelet.go:2475] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 30 00:09:40 crc kubenswrapper[5113]: E0130 00:09:40.772879 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.777547 5113 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b1264ac67579ad07e7e9003054d44fe40dd55285a4b2f7dc74e48be1aee0868a/globalmount" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.777617 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a7a88189-c967-4640-879e-27665747f20c" volumeName="kubernetes.io/secret/a7a88189-c967-4640-879e-27665747f20c-webhook-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.777648 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" volumeName="kubernetes.io/projected/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-kube-api-access-qqbfk" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.777669 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fc8db2c7-859d-47b3-a900-2bd0c0b2973b" volumeName="kubernetes.io/projected/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-kube-api-access-hckvg" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.777692 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="736c54fe-349c-4bb9-870a-d1c1d1c03831" volumeName="kubernetes.io/configmap/736c54fe-349c-4bb9-870a-d1c1d1c03831-client-ca" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.777720 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff" volumeName="kubernetes.io/empty-dir/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-catalog-content" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.777746 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="92dfbade-90b6-4169-8c07-72cff7f2c82b" volumeName="kubernetes.io/secret/92dfbade-90b6-4169-8c07-72cff7f2c82b-metrics-tls" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.777775 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-image-import-ca" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.777803 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f559dfa3-3917-43a2-97f6-61ddfda10e93" volumeName="kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-encryption-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.777832 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7599e0b6-bddf-4def-b7f2-0b32206e8651" volumeName="kubernetes.io/projected/7599e0b6-bddf-4def-b7f2-0b32206e8651-kube-api-access-ptkcf" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.777860 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.777883 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-login" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.777904 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c491984c-7d4b-44aa-8c1e-d7974424fa47" volumeName="kubernetes.io/projected/c491984c-7d4b-44aa-8c1e-d7974424fa47-kube-api-access-9vsz9" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.777926 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-trusted-ca-bundle" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.777944 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0effdbcf-dd7d-404d-9d48-77536d665a5d" volumeName="kubernetes.io/projected/0effdbcf-dd7d-404d-9d48-77536d665a5d-kube-api-access-mfzkj" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.777964 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" volumeName="kubernetes.io/secret/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-image-registry-operator-tls" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.777985 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09cfa50b-4138-4585-a53e-64dd3ab73335" volumeName="kubernetes.io/configmap/09cfa50b-4138-4585-a53e-64dd3ab73335-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778005 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9e9b5059-1b3e-4067-a63d-2952cbe863af" volumeName="kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-registry-tls" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778026 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a555ff2e-0be6-46d5-897d-863bb92ae2b3" volumeName="kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778046 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b4750666-1362-4001-abd0-6f89964cc621" volumeName="kubernetes.io/secret/b4750666-1362-4001-abd0-6f89964cc621-proxy-tls" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778065 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c5f2bfad-70f6-4185-a3d9-81ce12720767" volumeName="kubernetes.io/projected/c5f2bfad-70f6-4185-a3d9-81ce12720767-kube-api-access" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778084 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fc4541ce-7789-4670-bc75-5c2868e52ce0" volumeName="kubernetes.io/projected/fc4541ce-7789-4670-bc75-5c2868e52ce0-kube-api-access-8nt2j" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778106 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="2325ffef-9d5b-447f-b00e-3efc429acefe" volumeName="kubernetes.io/projected/2325ffef-9d5b-447f-b00e-3efc429acefe-kube-api-access-zg8nc" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778125 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" volumeName="kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-oauth-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778145 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" volumeName="kubernetes.io/secret/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778181 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f559dfa3-3917-43a2-97f6-61ddfda10e93" volumeName="kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-etcd-client" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778203 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="567683bd-0efc-4f21-b076-e28559628404" volumeName="kubernetes.io/projected/567683bd-0efc-4f21-b076-e28559628404-kube-api-access-m26jq" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778223 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="16bdd140-dce1-464c-ab47-dd5798d1d256" volumeName="kubernetes.io/empty-dir/16bdd140-dce1-464c-ab47-dd5798d1d256-available-featuregates" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778243 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-etcd-client" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778262 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" volumeName="kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-trusted-ca-bundle" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778283 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a" volumeName="kubernetes.io/empty-dir/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-utilities" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778302 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d565531a-ff86-4608-9d19-767de01ac31b" volumeName="kubernetes.io/configmap/d565531a-ff86-4608-9d19-767de01ac31b-auth-proxy-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778324 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" volumeName="kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-service-ca-bundle" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778344 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fc8db2c7-859d-47b3-a900-2bd0c0b2973b" volumeName="kubernetes.io/configmap/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778363 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="869851b9-7ffb-4af0-b166-1d8aa40a5f80" volumeName="kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-cni-sysctl-allowlist" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778382 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff" volumeName="kubernetes.io/empty-dir/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-utilities" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778406 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9e9b5059-1b3e-4067-a63d-2952cbe863af" volumeName="kubernetes.io/configmap/9e9b5059-1b3e-4067-a63d-2952cbe863af-trusted-ca" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778427 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9f71a554-e414-4bc3-96d2-674060397afe" volumeName="kubernetes.io/secret/9f71a554-e414-4bc3-96d2-674060397afe-metrics-tls" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778447 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b605f283-6f2e-42da-a838-54421690f7d0" volumeName="kubernetes.io/projected/b605f283-6f2e-42da-a838-54421690f7d0-kube-api-access-6rmnv" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778468 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fc8db2c7-859d-47b3-a900-2bd0c0b2973b" volumeName="kubernetes.io/secret/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-machine-approver-tls" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778489 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" volumeName="kubernetes.io/empty-dir/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-ca-trust-extracted-pem" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778516 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="34177974-8d82-49d2-a763-391d0df3bbd8" volumeName="kubernetes.io/secret/34177974-8d82-49d2-a763-391d0df3bbd8-metrics-tls" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778557 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ce090a97-9ab6-4c40-a719-64ff2acd9778" volumeName="kubernetes.io/configmap/ce090a97-9ab6-4c40-a719-64ff2acd9778-signing-cabundle" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778578 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01080b46-74f1-4191-8755-5152a57b3b25" volumeName="kubernetes.io/configmap/01080b46-74f1-4191-8755-5152a57b3b25-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778598 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" volumeName="kubernetes.io/projected/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-kube-api-access-tkdh6" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778618 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="584e1f4a-8205-47d7-8efb-3afc6017c4c9" volumeName="kubernetes.io/projected/584e1f4a-8205-47d7-8efb-3afc6017c4c9-kube-api-access-tknt7" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778634 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" volumeName="kubernetes.io/configmap/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778653 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f65c0ac1-8bca-454d-a2e6-e35cb418beac" volumeName="kubernetes.io/configmap/f65c0ac1-8bca-454d-a2e6-e35cb418beac-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778673 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f65c0ac1-8bca-454d-a2e6-e35cb418beac" volumeName="kubernetes.io/projected/f65c0ac1-8bca-454d-a2e6-e35cb418beac-kube-api-access" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778692 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="301e1965-1754-483d-b6cc-bfae7038bbca" volumeName="kubernetes.io/empty-dir/301e1965-1754-483d-b6cc-bfae7038bbca-tmpfs" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778711 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9e9b5059-1b3e-4067-a63d-2952cbe863af" volumeName="kubernetes.io/configmap/9e9b5059-1b3e-4067-a63d-2952cbe863af-registry-certificates" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778730 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9f71a554-e414-4bc3-96d2-674060397afe" volumeName="kubernetes.io/configmap/9f71a554-e414-4bc3-96d2-674060397afe-trusted-ca" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778750 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a555ff2e-0be6-46d5-897d-863bb92ae2b3" volumeName="kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-proxy-ca-bundles" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778769 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778791 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6edfcf45-925b-4eff-b940-95b6fc0b85d4" volumeName="kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778809 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ee8fbd3-1f81-4666-96da-5afc70819f1a" volumeName="kubernetes.io/secret/6ee8fbd3-1f81-4666-96da-5afc70819f1a-samples-operator-tls" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778828 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b605f283-6f2e-42da-a838-54421690f7d0" volumeName="kubernetes.io/empty-dir/b605f283-6f2e-42da-a838-54421690f7d0-catalog-content" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778846 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f65c0ac1-8bca-454d-a2e6-e35cb418beac" volumeName="kubernetes.io/secret/f65c0ac1-8bca-454d-a2e6-e35cb418beac-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778866 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09cfa50b-4138-4585-a53e-64dd3ab73335" volumeName="kubernetes.io/projected/09cfa50b-4138-4585-a53e-64dd3ab73335-kube-api-access-zsb9b" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778886 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" volumeName="kubernetes.io/empty-dir/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-tmp" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778906 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="2325ffef-9d5b-447f-b00e-3efc429acefe" volumeName="kubernetes.io/configmap/2325ffef-9d5b-447f-b00e-3efc429acefe-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778938 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="428b39f5-eb1c-4f65-b7a4-eeb6e84860cc" volumeName="kubernetes.io/configmap/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-iptables-alerter-script" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778957 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="428b39f5-eb1c-4f65-b7a4-eeb6e84860cc" volumeName="kubernetes.io/projected/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-kube-api-access-dsgwk" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778976 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="42a11a02-47e1-488f-b270-2679d3298b0e" volumeName="kubernetes.io/secret/42a11a02-47e1-488f-b270-2679d3298b0e-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778990 5113 policy_none.go:49] "None policy: Start" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779037 5113 memory_manager.go:186] "Starting memorymanager" policy="None" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779069 5113 state_mem.go:35] "Initializing new in-memory state store" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.778997 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c5f2bfad-70f6-4185-a3d9-81ce12720767" volumeName="kubernetes.io/configmap/c5f2bfad-70f6-4185-a3d9-81ce12720767-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779197 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7df94c10-441d-4386-93a6-6730fb7bcde0" volumeName="kubernetes.io/configmap/7df94c10-441d-4386-93a6-6730fb7bcde0-ovnkube-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779222 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31fa8943-81cc-4750-a0b7-0fa9ab5af883" volumeName="kubernetes.io/projected/31fa8943-81cc-4750-a0b7-0fa9ab5af883-kube-api-access-grwfz" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779240 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9f71a554-e414-4bc3-96d2-674060397afe" volumeName="kubernetes.io/projected/9f71a554-e414-4bc3-96d2-674060397afe-bound-sa-token" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779255 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a208c9c2-333b-4b4a-be0d-bc32ec38a821" volumeName="kubernetes.io/projected/a208c9c2-333b-4b4a-be0d-bc32ec38a821-kube-api-access-26xrl" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779271 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ce090a97-9ab6-4c40-a719-64ff2acd9778" volumeName="kubernetes.io/secret/ce090a97-9ab6-4c40-a719-64ff2acd9778-signing-key" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779285 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fc8db2c7-859d-47b3-a900-2bd0c0b2973b" volumeName="kubernetes.io/configmap/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-auth-proxy-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779301 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="301e1965-1754-483d-b6cc-bfae7038bbca" volumeName="kubernetes.io/projected/301e1965-1754-483d-b6cc-bfae7038bbca-kube-api-access-7jjkz" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779315 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" volumeName="kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-service-ca" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779329 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" volumeName="kubernetes.io/projected/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-kube-api-access-dztfv" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779347 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b638b8f4bb0070e40528db779baf6a2" volumeName="kubernetes.io/empty-dir/0b638b8f4bb0070e40528db779baf6a2-tmp" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779362 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7afa918d-be67-40a6-803c-d3b0ae99d815" volumeName="kubernetes.io/empty-dir/7afa918d-be67-40a6-803c-d3b0ae99d815-tmp" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779375 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a208c9c2-333b-4b4a-be0d-bc32ec38a821" volumeName="kubernetes.io/secret/a208c9c2-333b-4b4a-be0d-bc32ec38a821-package-server-manager-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779390 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d7e8f42f-dc0e-424b-bb56-5ec849834888" volumeName="kubernetes.io/configmap/d7e8f42f-dc0e-424b-bb56-5ec849834888-service-ca" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779404 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20c5c5b4bed930554494851fe3cb2b2a" volumeName="kubernetes.io/empty-dir/20c5c5b4bed930554494851fe3cb2b2a-tmp-dir" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779419 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="567683bd-0efc-4f21-b076-e28559628404" volumeName="kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779433 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="567683bd-0efc-4f21-b076-e28559628404" volumeName="kubernetes.io/secret/567683bd-0efc-4f21-b076-e28559628404-etcd-client" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779450 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a555ff2e-0be6-46d5-897d-863bb92ae2b3" volumeName="kubernetes.io/secret/a555ff2e-0be6-46d5-897d-863bb92ae2b3-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779464 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a7a88189-c967-4640-879e-27665747f20c" volumeName="kubernetes.io/empty-dir/a7a88189-c967-4640-879e-27665747f20c-tmpfs" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779479 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="18f80adb-c1c3-49ba-8ee4-932c851d3897" volumeName="kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-stats-auth" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779493 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f559dfa3-3917-43a2-97f6-61ddfda10e93" volumeName="kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779508 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="301e1965-1754-483d-b6cc-bfae7038bbca" volumeName="kubernetes.io/secret/301e1965-1754-483d-b6cc-bfae7038bbca-srv-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779539 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a555ff2e-0be6-46d5-897d-863bb92ae2b3" volumeName="kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-client-ca" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779556 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7df94c10-441d-4386-93a6-6730fb7bcde0" volumeName="kubernetes.io/secret/7df94c10-441d-4386-93a6-6730fb7bcde0-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779570 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="92dfbade-90b6-4169-8c07-72cff7f2c82b" volumeName="kubernetes.io/configmap/92dfbade-90b6-4169-8c07-72cff7f2c82b-config-volume" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779584 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9e9b5059-1b3e-4067-a63d-2952cbe863af" volumeName="kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-kube-api-access-ws8zz" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779602 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9e9b5059-1b3e-4067-a63d-2952cbe863af" volumeName="kubernetes.io/secret/9e9b5059-1b3e-4067-a63d-2952cbe863af-installation-pull-secrets" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779618 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d7e8f42f-dc0e-424b-bb56-5ec849834888" volumeName="kubernetes.io/secret/d7e8f42f-dc0e-424b-bb56-5ec849834888-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779632 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0dd0fbac-8c0d-4228-8faa-abbeedabf7db" volumeName="kubernetes.io/secret/0dd0fbac-8c0d-4228-8faa-abbeedabf7db-webhook-certs" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779647 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="34177974-8d82-49d2-a763-391d0df3bbd8" volumeName="kubernetes.io/projected/34177974-8d82-49d2-a763-391d0df3bbd8-kube-api-access-m7xz2" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779661 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="593a3561-7760-45c5-8f91-5aaef7475d0f" volumeName="kubernetes.io/secret/593a3561-7760-45c5-8f91-5aaef7475d0f-certs" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779675 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" volumeName="kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779689 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" volumeName="kubernetes.io/projected/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-kube-api-access-l9stx" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779707 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" volumeName="kubernetes.io/secret/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-srv-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779724 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="af33e427-6803-48c2-a76a-dd9deb7cbf9a" volumeName="kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovnkube-script-lib" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779739 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="2325ffef-9d5b-447f-b00e-3efc429acefe" volumeName="kubernetes.io/configmap/2325ffef-9d5b-447f-b00e-3efc429acefe-trusted-ca" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779752 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="567683bd-0efc-4f21-b076-e28559628404" volumeName="kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-etcd-ca" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779769 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="567683bd-0efc-4f21-b076-e28559628404" volumeName="kubernetes.io/secret/567683bd-0efc-4f21-b076-e28559628404-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779783 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="92dfbade-90b6-4169-8c07-72cff7f2c82b" volumeName="kubernetes.io/empty-dir/92dfbade-90b6-4169-8c07-72cff7f2c82b-tmp-dir" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779852 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d19cb085-0c5b-4810-b654-ce7923221d90" volumeName="kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-serving-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779867 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a555ff2e-0be6-46d5-897d-863bb92ae2b3" volumeName="kubernetes.io/empty-dir/a555ff2e-0be6-46d5-897d-863bb92ae2b3-tmp" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779882 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d565531a-ff86-4608-9d19-767de01ac31b" volumeName="kubernetes.io/configmap/d565531a-ff86-4608-9d19-767de01ac31b-images" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779896 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e093be35-bb62-4843-b2e8-094545761610" volumeName="kubernetes.io/projected/e093be35-bb62-4843-b2e8-094545761610-kube-api-access-pddnv" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779913 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="18f80adb-c1c3-49ba-8ee4-932c851d3897" volumeName="kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-default-certificate" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779954 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" volumeName="kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779969 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fc4541ce-7789-4670-bc75-5c2868e52ce0" volumeName="kubernetes.io/configmap/fc4541ce-7789-4670-bc75-5c2868e52ce0-ovnkube-identity-cm" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779984 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fc4541ce-7789-4670-bc75-5c2868e52ce0" volumeName="kubernetes.io/secret/fc4541ce-7789-4670-bc75-5c2868e52ce0-webhook-cert" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.779998 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="584e1f4a-8205-47d7-8efb-3afc6017c4c9" volumeName="kubernetes.io/empty-dir/584e1f4a-8205-47d7-8efb-3afc6017c4c9-catalog-content" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.780013 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f559dfa3-3917-43a2-97f6-61ddfda10e93" volumeName="kubernetes.io/projected/f559dfa3-3917-43a2-97f6-61ddfda10e93-kube-api-access-hm9x7" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.780028 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f863fff9-286a-45fa-b8f0-8a86994b8440" volumeName="kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.780043 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="567683bd-0efc-4f21-b076-e28559628404" volumeName="kubernetes.io/empty-dir/567683bd-0efc-4f21-b076-e28559628404-tmp-dir" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.780058 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a" volumeName="kubernetes.io/projected/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-kube-api-access-ks6v2" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.780073 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b4750666-1362-4001-abd0-6f89964cc621" volumeName="kubernetes.io/configmap/b4750666-1362-4001-abd0-6f89964cc621-mcc-auth-proxy-config" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.780086 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="16bdd140-dce1-464c-ab47-dd5798d1d256" volumeName="kubernetes.io/projected/16bdd140-dce1-464c-ab47-dd5798d1d256-kube-api-access-94l9h" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.780109 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="94a6e063-3d1a-4d44-875d-185291448c31" volumeName="kubernetes.io/empty-dir/94a6e063-3d1a-4d44-875d-185291448c31-catalog-content" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.780122 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fc4541ce-7789-4670-bc75-5c2868e52ce0" volumeName="kubernetes.io/configmap/fc4541ce-7789-4670-bc75-5c2868e52ce0-env-overrides" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.780137 5113 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6077b63e-53a2-4f96-9d56-1ce0324e4913" volumeName="kubernetes.io/empty-dir/6077b63e-53a2-4f96-9d56-1ce0324e4913-tmp-dir" seLinuxMountContext="" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.780149 5113 reconstruct.go:97] "Volume reconstruction finished" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.780159 5113 reconciler.go:26] "Reconciler: start to sync state" Jan 30 00:09:40 crc kubenswrapper[5113]: E0130 00:09:40.806193 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.813550 5113 manager.go:341] "Starting Device Plugin manager" Jan 30 00:09:40 crc kubenswrapper[5113]: E0130 00:09:40.813790 5113 manager.go:517] "Failed to read data from checkpoint" err="checkpoint is not found" checkpoint="kubelet_internal_checkpoint" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.813807 5113 server.go:85] "Starting device plugin registration server" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.815578 5113 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.815615 5113 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.816004 5113 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.816282 5113 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.816306 5113 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 30 00:09:40 crc kubenswrapper[5113]: E0130 00:09:40.821918 5113 eviction_manager.go:267] "eviction manager: failed to check if we have separate container filesystem. Ignoring." err="non-existent label \"crio-containers\"" Jan 30 00:09:40 crc kubenswrapper[5113]: E0130 00:09:40.821970 5113 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.872370 5113 kubelet.go:2537] "SyncLoop ADD" source="file" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.872706 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.874154 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.874194 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.874209 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.874839 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.875009 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.875058 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.875611 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.875653 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.875652 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.875684 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.875701 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.875662 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.876955 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.877197 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.877292 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.877497 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.877534 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.877543 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.878109 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.878299 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.878367 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.879510 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.879608 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.879633 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.879687 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.879765 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.879880 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.880066 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.879956 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.880094 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.881722 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.881822 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.881891 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.882559 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.882596 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.882606 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.882736 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.882777 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.882801 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.883244 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.883278 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.883793 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.883842 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.883865 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:40 crc kubenswrapper[5113]: E0130 00:09:40.904378 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="400ms" Jan 30 00:09:40 crc kubenswrapper[5113]: E0130 00:09:40.909568 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.915758 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.916887 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.916950 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.916968 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.917004 5113 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:09:40 crc kubenswrapper[5113]: E0130 00:09:40.917632 5113 kubelet_node_status.go:110] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.251:6443: connect: connection refused" node="crc" Jan 30 00:09:40 crc kubenswrapper[5113]: E0130 00:09:40.919090 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:40 crc kubenswrapper[5113]: E0130 00:09:40.946931 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:40 crc kubenswrapper[5113]: E0130 00:09:40.957123 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:40 crc kubenswrapper[5113]: E0130 00:09:40.962967 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.983643 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-tmp-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.983744 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/9f0bc7fcb0822a2c13eb2d22cd8c0641-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.983803 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-tmp-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.983833 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/0b638b8f4bb0070e40528db779baf6a2-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"0b638b8f4bb0070e40528db779baf6a2\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.983877 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/0b638b8f4bb0070e40528db779baf6a2-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"0b638b8f4bb0070e40528db779baf6a2\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.983901 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-log-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.983930 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-auto-backup-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-etcd-auto-backup-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.983982 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/9f0bc7fcb0822a2c13eb2d22cd8c0641-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984054 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ca-trust-dir\" (UniqueName: \"kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-ca-trust-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984090 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/0b638b8f4bb0070e40528db779baf6a2-tmp\") pod \"openshift-kube-scheduler-crc\" (UID: \"0b638b8f4bb0070e40528db779baf6a2\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984120 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-resource-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984138 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-cert-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984187 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-data-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984215 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-usr-local-bin\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984242 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984268 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ca-bundle-dir\" (UniqueName: \"kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-ca-bundle-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984381 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-tmp-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984570 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"var-run-kubernetes\" (UniqueName: \"kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-var-run-kubernetes\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984591 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ca-bundle-dir\" (UniqueName: \"kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-ca-bundle-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984664 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"var-run-kubernetes\" (UniqueName: \"kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-var-run-kubernetes\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984726 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-static-pod-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984753 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/20c5c5b4bed930554494851fe3cb2b2a-tmp-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984831 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984876 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984783 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ca-trust-dir\" (UniqueName: \"kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-ca-trust-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984919 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/4e08c320b1e9e2405e6e0107bdf7eeb4-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"4e08c320b1e9e2405e6e0107bdf7eeb4\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.984977 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/4e08c320b1e9e2405e6e0107bdf7eeb4-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"4e08c320b1e9e2405e6e0107bdf7eeb4\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.985038 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/0b638b8f4bb0070e40528db779baf6a2-tmp\") pod \"openshift-kube-scheduler-crc\" (UID: \"0b638b8f4bb0070e40528db779baf6a2\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.985239 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/9f0bc7fcb0822a2c13eb2d22cd8c0641-tmp-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:40 crc kubenswrapper[5113]: I0130 00:09:40.986432 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/20c5c5b4bed930554494851fe3cb2b2a-tmp-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.086615 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/9f0bc7fcb0822a2c13eb2d22cd8c0641-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.086681 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-resource-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.086735 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-resource-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.086783 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/9f0bc7fcb0822a2c13eb2d22cd8c0641-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.086885 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-cert-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.086969 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-data-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087010 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-usr-local-bin\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087023 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-cert-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087046 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087119 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087139 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-data-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087173 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-static-pod-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087186 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-usr-local-bin\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087148 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-static-pod-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087233 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087254 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087278 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/4e08c320b1e9e2405e6e0107bdf7eeb4-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"4e08c320b1e9e2405e6e0107bdf7eeb4\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087294 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087302 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/4e08c320b1e9e2405e6e0107bdf7eeb4-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"4e08c320b1e9e2405e6e0107bdf7eeb4\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087342 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/9f0bc7fcb0822a2c13eb2d22cd8c0641-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087367 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087405 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/4e08c320b1e9e2405e6e0107bdf7eeb4-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"4e08c320b1e9e2405e6e0107bdf7eeb4\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087385 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/4e08c320b1e9e2405e6e0107bdf7eeb4-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"4e08c320b1e9e2405e6e0107bdf7eeb4\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087465 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/0b638b8f4bb0070e40528db779baf6a2-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"0b638b8f4bb0070e40528db779baf6a2\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087467 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/9f0bc7fcb0822a2c13eb2d22cd8c0641-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"9f0bc7fcb0822a2c13eb2d22cd8c0641\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087518 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/0b638b8f4bb0070e40528db779baf6a2-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"0b638b8f4bb0070e40528db779baf6a2\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087583 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/0b638b8f4bb0070e40528db779baf6a2-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"0b638b8f4bb0070e40528db779baf6a2\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087588 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-log-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087599 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/0b638b8f4bb0070e40528db779baf6a2-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"0b638b8f4bb0070e40528db779baf6a2\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087621 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-auto-backup-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-etcd-auto-backup-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087700 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-log-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.087774 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etcd-auto-backup-dir\" (UniqueName: \"kubernetes.io/host-path/20c5c5b4bed930554494851fe3cb2b2a-etcd-auto-backup-dir\") pod \"etcd-crc\" (UID: \"20c5c5b4bed930554494851fe3cb2b2a\") " pod="openshift-etcd/etcd-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.117809 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.119016 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.119073 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.119085 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.119115 5113 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:09:41 crc kubenswrapper[5113]: E0130 00:09:41.123034 5113 kubelet_node_status.go:110] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.251:6443: connect: connection refused" node="crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.211425 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.220499 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.247339 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.258173 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.265979 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:41 crc kubenswrapper[5113]: W0130 00:09:41.270646 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod20c5c5b4bed930554494851fe3cb2b2a.slice/crio-dee01b18edc58668067b6a6492f43e0dfe418f986b098375241e6889a066423c WatchSource:0}: Error finding container dee01b18edc58668067b6a6492f43e0dfe418f986b098375241e6889a066423c: Status 404 returned error can't find the container with id dee01b18edc58668067b6a6492f43e0dfe418f986b098375241e6889a066423c Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.275702 5113 provider.go:93] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 00:09:41 crc kubenswrapper[5113]: W0130 00:09:41.276029 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b638b8f4bb0070e40528db779baf6a2.slice/crio-e2d386d5f5bfcf2eea78e517368951829c74992c83a96b68c33b608ba3bda829 WatchSource:0}: Error finding container e2d386d5f5bfcf2eea78e517368951829c74992c83a96b68c33b608ba3bda829: Status 404 returned error can't find the container with id e2d386d5f5bfcf2eea78e517368951829c74992c83a96b68c33b608ba3bda829 Jan 30 00:09:41 crc kubenswrapper[5113]: W0130 00:09:41.300230 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9f0bc7fcb0822a2c13eb2d22cd8c0641.slice/crio-14bc12510d324275191c18ff8683120cc85a6a32049d546310b7e60c1595fbd3 WatchSource:0}: Error finding container 14bc12510d324275191c18ff8683120cc85a6a32049d546310b7e60c1595fbd3: Status 404 returned error can't find the container with id 14bc12510d324275191c18ff8683120cc85a6a32049d546310b7e60c1595fbd3 Jan 30 00:09:41 crc kubenswrapper[5113]: E0130 00:09:41.305401 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="800ms" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.523375 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.525365 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.525454 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.525473 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.525512 5113 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:09:41 crc kubenswrapper[5113]: E0130 00:09:41.526362 5113 kubelet_node_status.go:110] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.251:6443: connect: connection refused" node="crc" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.652863 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Jan 30 00:09:41 crc kubenswrapper[5113]: E0130 00:09:41.725479 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.784636 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"9f0bc7fcb0822a2c13eb2d22cd8c0641","Type":"ContainerStarted","Data":"14bc12510d324275191c18ff8683120cc85a6a32049d546310b7e60c1595fbd3"} Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.785845 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"4e08c320b1e9e2405e6e0107bdf7eeb4","Type":"ContainerStarted","Data":"4151938d41fa28c853e70c2c5c7ded64981a8aae3e7a5b2c179bab55605538e3"} Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.790078 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerStarted","Data":"dee01b18edc58668067b6a6492f43e0dfe418f986b098375241e6889a066423c"} Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.792933 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"0b638b8f4bb0070e40528db779baf6a2","Type":"ContainerStarted","Data":"e2d386d5f5bfcf2eea78e517368951829c74992c83a96b68c33b608ba3bda829"} Jan 30 00:09:41 crc kubenswrapper[5113]: I0130 00:09:41.795966 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"fc45d60c6687ade3ed71d78136fbe17c18c90b36f7aa406c7ffea153480f59f6"} Jan 30 00:09:41 crc kubenswrapper[5113]: E0130 00:09:41.947382 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" Jan 30 00:09:42 crc kubenswrapper[5113]: E0130 00:09:42.106877 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="1.6s" Jan 30 00:09:42 crc kubenswrapper[5113]: E0130 00:09:42.166035 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" Jan 30 00:09:42 crc kubenswrapper[5113]: E0130 00:09:42.251765 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.326698 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.328578 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.328657 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.328677 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.328729 5113 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:09:42 crc kubenswrapper[5113]: E0130 00:09:42.329627 5113 kubelet_node_status.go:110] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.251:6443: connect: connection refused" node="crc" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.651874 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.663857 5113 certificate_manager.go:566] "Rotating certificates" logger="kubernetes.io/kube-apiserver-client-kubelet" Jan 30 00:09:42 crc kubenswrapper[5113]: E0130 00:09:42.665343 5113 certificate_manager.go:596] "Failed while requesting a signed certificate from the control plane" err="cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="kubernetes.io/kube-apiserver-client-kubelet.UnhandledError" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.804873 5113 generic.go:358] "Generic (PLEG): container finished" podID="4e08c320b1e9e2405e6e0107bdf7eeb4" containerID="556c149aaf3abcc7711392b257ff4f10359d4d9e8ee8b8b3383a970de7a28303" exitCode=0 Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.805011 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"4e08c320b1e9e2405e6e0107bdf7eeb4","Type":"ContainerDied","Data":"556c149aaf3abcc7711392b257ff4f10359d4d9e8ee8b8b3383a970de7a28303"} Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.805090 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.806352 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.806395 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.806410 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:42 crc kubenswrapper[5113]: E0130 00:09:42.806660 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.807924 5113 generic.go:358] "Generic (PLEG): container finished" podID="20c5c5b4bed930554494851fe3cb2b2a" containerID="f6f2151f8ca9d295331aa70c7cb364785d177a2cabe410797748bb8b3f2d294e" exitCode=0 Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.807971 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerDied","Data":"f6f2151f8ca9d295331aa70c7cb364785d177a2cabe410797748bb8b3f2d294e"} Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.808074 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.810790 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.810823 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.810835 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:42 crc kubenswrapper[5113]: E0130 00:09:42.811150 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.814417 5113 generic.go:358] "Generic (PLEG): container finished" podID="0b638b8f4bb0070e40528db779baf6a2" containerID="1674b7d60c4a07220d2988766c24f8b6c7835f7d1736041dc4cdf00f7a96e9e2" exitCode=0 Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.814538 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"0b638b8f4bb0070e40528db779baf6a2","Type":"ContainerDied","Data":"1674b7d60c4a07220d2988766c24f8b6c7835f7d1736041dc4cdf00f7a96e9e2"} Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.814580 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.815596 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.815680 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.815702 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:42 crc kubenswrapper[5113]: E0130 00:09:42.816191 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.816376 5113 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="84f00261046fc5a9b778b011faaff480069dd0eb02fda16510bbd01e21895988" exitCode=0 Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.816405 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerDied","Data":"84f00261046fc5a9b778b011faaff480069dd0eb02fda16510bbd01e21895988"} Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.816677 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.817745 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.817779 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.817793 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:42 crc kubenswrapper[5113]: E0130 00:09:42.818069 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.818819 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"9f0bc7fcb0822a2c13eb2d22cd8c0641","Type":"ContainerStarted","Data":"fb5de81be22616fca47976a0d24ab6c6b330a5560704d7c9bd3f30816a6a53c7"} Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.818859 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"9f0bc7fcb0822a2c13eb2d22cd8c0641","Type":"ContainerStarted","Data":"6e418d2037fa46413cbb5c58dc73ecf2ecc6f110ebd3bfec9715e53ec0b6c855"} Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.820687 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.823226 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.823272 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:42 crc kubenswrapper[5113]: I0130 00:09:42.823289 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:43 crc kubenswrapper[5113]: E0130 00:09:43.151642 5113 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.251:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188f59ad975b9013 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.683468819 +0000 UTC m=+0.756074236,LastTimestamp:2026-01-30 00:09:40.683468819 +0000 UTC m=+0.756074236,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.651652 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Jan 30 00:09:43 crc kubenswrapper[5113]: E0130 00:09:43.707628 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="3.2s" Jan 30 00:09:43 crc kubenswrapper[5113]: E0130 00:09:43.735885 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.831685 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"10b39007b1d5476031fa74088ab1fc27641cd0d8637344b799ccb7bd0d7ed170"} Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.831747 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"3a34c408f150a15c63352bc45a7746036fdb5242aeda8b3d3f68c530dcacca16"} Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.831759 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"f193a62499ba084b7bc2ab4965ca8f7c645e6f17135a246bce0bba25105ae580"} Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.837232 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"9f0bc7fcb0822a2c13eb2d22cd8c0641","Type":"ContainerStarted","Data":"d32db858499f6acdd8dcfeec470facb33761e98b6974386bdf9e165a721026b8"} Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.837346 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"9f0bc7fcb0822a2c13eb2d22cd8c0641","Type":"ContainerStarted","Data":"c01e6a834297ca754c755531034c6a4ded795a39c18f0f97cc0dc73214b2356a"} Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.837480 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.838298 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.838332 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.838344 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:43 crc kubenswrapper[5113]: E0130 00:09:43.838595 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.840186 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"4e08c320b1e9e2405e6e0107bdf7eeb4","Type":"ContainerStarted","Data":"c9513f6490ba61188cc5ec698f270d0e1c17f4e1a9c4bdda59e9e18665a7dd9e"} Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.840263 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.841788 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.841818 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.841832 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:43 crc kubenswrapper[5113]: E0130 00:09:43.841995 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.842758 5113 generic.go:358] "Generic (PLEG): container finished" podID="20c5c5b4bed930554494851fe3cb2b2a" containerID="3c5720519c1757ed7c8bf0c9f56cb990b9f450f7c9c6bc1fd4961f8851f2cd14" exitCode=0 Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.842839 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerDied","Data":"3c5720519c1757ed7c8bf0c9f56cb990b9f450f7c9c6bc1fd4961f8851f2cd14"} Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.843227 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.844020 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.844054 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.844066 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:43 crc kubenswrapper[5113]: E0130 00:09:43.844327 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.848272 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"0b638b8f4bb0070e40528db779baf6a2","Type":"ContainerStarted","Data":"5aca51a566a8bfc282a87f7f7c29ccaa92469aca9b326106d1e177dcb6a0b159"} Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.848303 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"0b638b8f4bb0070e40528db779baf6a2","Type":"ContainerStarted","Data":"d756a0c45f137a1c35d97de642058fe0719246aa403da963da642b3575e4a7c8"} Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.848317 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"0b638b8f4bb0070e40528db779baf6a2","Type":"ContainerStarted","Data":"1f0821e94ddb2e6bf615e3accd0ec0c094ad7318840fb733498457386fa12672"} Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.848451 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.849698 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.849773 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.849788 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:43 crc kubenswrapper[5113]: E0130 00:09:43.850110 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.929782 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.931074 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.931118 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.931131 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:43 crc kubenswrapper[5113]: I0130 00:09:43.931163 5113 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:09:43 crc kubenswrapper[5113]: E0130 00:09:43.931589 5113 kubelet_node_status.go:110] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.251:6443: connect: connection refused" node="crc" Jan 30 00:09:44 crc kubenswrapper[5113]: E0130 00:09:44.388909 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.251:6443: connect: connection refused" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.515518 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.652193 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.251:6443: connect: connection refused Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.854405 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"8cced52517a20ca5075db64b94072cb4001674e94a06e701dec72f6a07ab2aa0"} Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.854471 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"77c6a4ce119d456c938cd7b1ab2a1187857c309661ce7a3c4ebafdc977385d24"} Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.854654 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.856875 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.856957 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.856979 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.856973 5113 generic.go:358] "Generic (PLEG): container finished" podID="20c5c5b4bed930554494851fe3cb2b2a" containerID="65e9cdf813a17a4871a7677b2b0d236147c4db43f9b87a38b60c0795d93a5207" exitCode=0 Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.857214 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.857351 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.857361 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerDied","Data":"65e9cdf813a17a4871a7677b2b0d236147c4db43f9b87a38b60c0795d93a5207"} Jan 30 00:09:44 crc kubenswrapper[5113]: E0130 00:09:44.857566 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.857631 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.857570 5113 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.858161 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.858836 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.858867 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.858882 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.858903 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.858945 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.858971 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.858977 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.858973 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.859028 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.859045 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.859054 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:44 crc kubenswrapper[5113]: I0130 00:09:44.859068 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:44 crc kubenswrapper[5113]: E0130 00:09:44.859299 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:44 crc kubenswrapper[5113]: E0130 00:09:44.859540 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:44 crc kubenswrapper[5113]: E0130 00:09:44.860173 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:44 crc kubenswrapper[5113]: E0130 00:09:44.860440 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:45 crc kubenswrapper[5113]: I0130 00:09:45.864364 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerStarted","Data":"895ff1a1765310a6aee57eb2705b412bd05dfe3974bd21a972125242e01f91a5"} Jan 30 00:09:45 crc kubenswrapper[5113]: I0130 00:09:45.864447 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerStarted","Data":"18179ab63cf80a7f758b7fd5824271423f89324275d958eb798af84b0b460a97"} Jan 30 00:09:45 crc kubenswrapper[5113]: I0130 00:09:45.864477 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerStarted","Data":"c84daac274d8f9e1ac29f34d4d1dbecebb0dea078366aa37a4cbbc588a678232"} Jan 30 00:09:45 crc kubenswrapper[5113]: I0130 00:09:45.864582 5113 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 00:09:45 crc kubenswrapper[5113]: I0130 00:09:45.864604 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:45 crc kubenswrapper[5113]: I0130 00:09:45.864637 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:45 crc kubenswrapper[5113]: I0130 00:09:45.865415 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:45 crc kubenswrapper[5113]: I0130 00:09:45.865465 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:45 crc kubenswrapper[5113]: I0130 00:09:45.865514 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:45 crc kubenswrapper[5113]: I0130 00:09:45.865592 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:45 crc kubenswrapper[5113]: I0130 00:09:45.865480 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:45 crc kubenswrapper[5113]: I0130 00:09:45.865702 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:45 crc kubenswrapper[5113]: E0130 00:09:45.866202 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:45 crc kubenswrapper[5113]: E0130 00:09:45.866582 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:46 crc kubenswrapper[5113]: I0130 00:09:46.757200 5113 certificate_manager.go:566] "Rotating certificates" logger="kubernetes.io/kube-apiserver-client-kubelet" Jan 30 00:09:46 crc kubenswrapper[5113]: I0130 00:09:46.872133 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerStarted","Data":"3802970e9c97e6b99feaf7ca3cfd1f6939675398a66b6034ad9d323eae1838ef"} Jan 30 00:09:46 crc kubenswrapper[5113]: I0130 00:09:46.872205 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"20c5c5b4bed930554494851fe3cb2b2a","Type":"ContainerStarted","Data":"ab0ebe5822f8252d91adf6695fa0650c95e18f3a4a36ba72dfb2277b4ab1778c"} Jan 30 00:09:46 crc kubenswrapper[5113]: I0130 00:09:46.872333 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:46 crc kubenswrapper[5113]: I0130 00:09:46.873251 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:46 crc kubenswrapper[5113]: I0130 00:09:46.873313 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:46 crc kubenswrapper[5113]: I0130 00:09:46.873333 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:46 crc kubenswrapper[5113]: E0130 00:09:46.873739 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:46 crc kubenswrapper[5113]: I0130 00:09:46.902124 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:46 crc kubenswrapper[5113]: I0130 00:09:46.902353 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:46 crc kubenswrapper[5113]: I0130 00:09:46.903110 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:46 crc kubenswrapper[5113]: I0130 00:09:46.903205 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:46 crc kubenswrapper[5113]: I0130 00:09:46.903228 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:46 crc kubenswrapper[5113]: E0130 00:09:46.903747 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:46 crc kubenswrapper[5113]: I0130 00:09:46.999671 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-etcd/etcd-crc" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.131804 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.133149 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.133206 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.133227 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.133262 5113 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.288104 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.516095 5113 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://localhost:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.516237 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://localhost:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.571633 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.571920 5113 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.571978 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.573365 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.573467 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.573489 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:47 crc kubenswrapper[5113]: E0130 00:09:47.574181 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.641245 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.641549 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.642445 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.642492 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.642505 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:47 crc kubenswrapper[5113]: E0130 00:09:47.642866 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.875199 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.875260 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.876233 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.876277 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.876328 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.876346 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.876284 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:47 crc kubenswrapper[5113]: I0130 00:09:47.876407 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:47 crc kubenswrapper[5113]: E0130 00:09:47.876982 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:47 crc kubenswrapper[5113]: E0130 00:09:47.877492 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:48 crc kubenswrapper[5113]: I0130 00:09:48.877706 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:48 crc kubenswrapper[5113]: I0130 00:09:48.878768 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:48 crc kubenswrapper[5113]: I0130 00:09:48.878817 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:48 crc kubenswrapper[5113]: I0130 00:09:48.878832 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:48 crc kubenswrapper[5113]: E0130 00:09:48.879326 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:49 crc kubenswrapper[5113]: I0130 00:09:49.225628 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:49 crc kubenswrapper[5113]: I0130 00:09:49.225878 5113 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 00:09:49 crc kubenswrapper[5113]: I0130 00:09:49.225926 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:49 crc kubenswrapper[5113]: I0130 00:09:49.227134 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:49 crc kubenswrapper[5113]: I0130 00:09:49.227207 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:49 crc kubenswrapper[5113]: I0130 00:09:49.227226 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:49 crc kubenswrapper[5113]: E0130 00:09:49.227882 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:50 crc kubenswrapper[5113]: I0130 00:09:50.566511 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:50 crc kubenswrapper[5113]: I0130 00:09:50.566943 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:50 crc kubenswrapper[5113]: I0130 00:09:50.568736 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:50 crc kubenswrapper[5113]: I0130 00:09:50.568803 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:50 crc kubenswrapper[5113]: I0130 00:09:50.568825 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:50 crc kubenswrapper[5113]: E0130 00:09:50.569413 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:50 crc kubenswrapper[5113]: I0130 00:09:50.576564 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:09:50 crc kubenswrapper[5113]: I0130 00:09:50.660757 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:50 crc kubenswrapper[5113]: I0130 00:09:50.661049 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:50 crc kubenswrapper[5113]: I0130 00:09:50.662058 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:50 crc kubenswrapper[5113]: I0130 00:09:50.662123 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:50 crc kubenswrapper[5113]: I0130 00:09:50.662136 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:50 crc kubenswrapper[5113]: E0130 00:09:50.662639 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:50 crc kubenswrapper[5113]: E0130 00:09:50.822207 5113 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:09:50 crc kubenswrapper[5113]: I0130 00:09:50.882988 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:50 crc kubenswrapper[5113]: I0130 00:09:50.884428 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:50 crc kubenswrapper[5113]: I0130 00:09:50.884488 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:50 crc kubenswrapper[5113]: I0130 00:09:50.884508 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:50 crc kubenswrapper[5113]: E0130 00:09:50.885037 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:54 crc kubenswrapper[5113]: I0130 00:09:54.100400 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 30 00:09:54 crc kubenswrapper[5113]: I0130 00:09:54.100748 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:54 crc kubenswrapper[5113]: I0130 00:09:54.102220 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:54 crc kubenswrapper[5113]: I0130 00:09:54.102296 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:54 crc kubenswrapper[5113]: I0130 00:09:54.102324 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:54 crc kubenswrapper[5113]: E0130 00:09:54.103140 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:54 crc kubenswrapper[5113]: I0130 00:09:54.772517 5113 trace.go:236] Trace[70238122]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (30-Jan-2026 00:09:44.770) (total time: 10001ms): Jan 30 00:09:54 crc kubenswrapper[5113]: Trace[70238122]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (00:09:54.772) Jan 30 00:09:54 crc kubenswrapper[5113]: Trace[70238122]: [10.001550417s] [10.001550417s] END Jan 30 00:09:54 crc kubenswrapper[5113]: E0130 00:09:54.772604 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" Jan 30 00:09:55 crc kubenswrapper[5113]: I0130 00:09:55.071385 5113 trace.go:236] Trace[1240301006]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (30-Jan-2026 00:09:45.067) (total time: 10003ms): Jan 30 00:09:55 crc kubenswrapper[5113]: Trace[1240301006]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10003ms (00:09:55.071) Jan 30 00:09:55 crc kubenswrapper[5113]: Trace[1240301006]: [10.003657122s] [10.003657122s] END Jan 30 00:09:55 crc kubenswrapper[5113]: E0130 00:09:55.071445 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" Jan 30 00:09:55 crc kubenswrapper[5113]: I0130 00:09:55.367328 5113 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 30 00:09:55 crc kubenswrapper[5113]: I0130 00:09:55.367428 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 30 00:09:55 crc kubenswrapper[5113]: I0130 00:09:55.374786 5113 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 30 00:09:55 crc kubenswrapper[5113]: I0130 00:09:55.374859 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 30 00:09:56 crc kubenswrapper[5113]: E0130 00:09:56.910040 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Jan 30 00:09:57 crc kubenswrapper[5113]: I0130 00:09:57.516305 5113 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://localhost:10357/healthz\": context deadline exceeded" start-of-body= Jan 30 00:09:57 crc kubenswrapper[5113]: I0130 00:09:57.516424 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://localhost:10357/healthz\": context deadline exceeded" Jan 30 00:09:57 crc kubenswrapper[5113]: I0130 00:09:57.581588 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:57 crc kubenswrapper[5113]: I0130 00:09:57.581960 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:57 crc kubenswrapper[5113]: I0130 00:09:57.583134 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:57 crc kubenswrapper[5113]: I0130 00:09:57.583239 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:57 crc kubenswrapper[5113]: I0130 00:09:57.583263 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:57 crc kubenswrapper[5113]: E0130 00:09:57.583900 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:57 crc kubenswrapper[5113]: I0130 00:09:57.590089 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:09:57 crc kubenswrapper[5113]: I0130 00:09:57.900968 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:09:57 crc kubenswrapper[5113]: I0130 00:09:57.902158 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:09:57 crc kubenswrapper[5113]: I0130 00:09:57.902246 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:09:57 crc kubenswrapper[5113]: I0130 00:09:57.902279 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:09:57 crc kubenswrapper[5113]: E0130 00:09:57.902986 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:09:58 crc kubenswrapper[5113]: E0130 00:09:58.011907 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.RuntimeClass: runtimeclasses.node.k8s.io is forbidden: User \"system:anonymous\" cannot list resource \"runtimeclasses\" in API group \"node.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" Jan 30 00:09:58 crc kubenswrapper[5113]: E0130 00:09:58.946995 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.Node: nodes \"crc\" is forbidden: User \"system:anonymous\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.377779 5113 trace.go:236] Trace[555648179]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (30-Jan-2026 00:09:49.938) (total time: 10439ms): Jan 30 00:10:00 crc kubenswrapper[5113]: Trace[555648179]: ---"Objects listed" error:csidrivers.storage.k8s.io is forbidden: User "system:anonymous" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope 10439ms (00:10:00.377) Jan 30 00:10:00 crc kubenswrapper[5113]: Trace[555648179]: [10.43925727s] [10.43925727s] END Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.377838 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:anonymous\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.378183 5113 trace.go:236] Trace[1565732946]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (30-Jan-2026 00:09:50.122) (total time: 10255ms): Jan 30 00:10:00 crc kubenswrapper[5113]: Trace[1565732946]: ---"Objects listed" error:services is forbidden: User "system:anonymous" cannot list resource "services" in API group "" at the cluster scope 10255ms (00:10:00.378) Jan 30 00:10:00 crc kubenswrapper[5113]: Trace[1565732946]: [10.255987212s] [10.255987212s] END Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.378079 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad975b9013 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.683468819 +0000 UTC m=+0.756074236,LastTimestamp:2026-01-30 00:09:40.683468819 +0000 UTC m=+0.756074236,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.378221 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:anonymous\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.378420 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.378849 5113 kubelet_node_status.go:116] "Unable to register node with API server, error getting existing node" err="nodes \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.384624 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7b16df default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769421023 +0000 UTC m=+0.842026420,LastTimestamp:2026-01-30 00:09:40.769421023 +0000 UTC m=+0.842026420,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.390149 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7b7270 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769444464 +0000 UTC m=+0.842049851,LastTimestamp:2026-01-30 00:09:40.769444464 +0000 UTC m=+0.842049851,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.402205 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7ba948 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769458504 +0000 UTC m=+0.842063901,LastTimestamp:2026-01-30 00:09:40.769458504 +0000 UTC m=+0.842063901,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.411211 5113 reflector.go:430] "Caches populated" logger="kubernetes.io/kube-apiserver-client-kubelet" type="*v1.CertificateSigningRequest" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.421840 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9f43d4ef default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeAllocatableEnforced,Message:Updated Node Allocatable limit across pods,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.816131311 +0000 UTC m=+0.888736688,LastTimestamp:2026-01-30 00:09:40.816131311 +0000 UTC m=+0.888736688,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.425983 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7b16df\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7b16df default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769421023 +0000 UTC m=+0.842026420,LastTimestamp:2026-01-30 00:09:40.874177431 +0000 UTC m=+0.946782808,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.429076 5113 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33026->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.429131 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33026->192.168.126.11:17697: read: connection reset by peer" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.429315 5113 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.429343 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.432757 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7b7270\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7b7270 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769444464 +0000 UTC m=+0.842049851,LastTimestamp:2026-01-30 00:09:40.874200622 +0000 UTC m=+0.946805999,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.440715 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7ba948\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7ba948 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769458504 +0000 UTC m=+0.842063901,LastTimestamp:2026-01-30 00:09:40.874214402 +0000 UTC m=+0.946819779,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.444662 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7b16df\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7b16df default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769421023 +0000 UTC m=+0.842026420,LastTimestamp:2026-01-30 00:09:40.875639139 +0000 UTC m=+0.948244506,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.447967 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7b7270\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7b7270 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769444464 +0000 UTC m=+0.842049851,LastTimestamp:2026-01-30 00:09:40.875658479 +0000 UTC m=+0.948263856,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.451776 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7b16df\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7b16df default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769421023 +0000 UTC m=+0.842026420,LastTimestamp:2026-01-30 00:09:40.875671139 +0000 UTC m=+0.948276556,Count:4,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.457552 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7b7270\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7b7270 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769444464 +0000 UTC m=+0.842049851,LastTimestamp:2026-01-30 00:09:40.87569317 +0000 UTC m=+0.948298587,Count:4,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.460839 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7ba948\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7ba948 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769458504 +0000 UTC m=+0.842063901,LastTimestamp:2026-01-30 00:09:40.87571082 +0000 UTC m=+0.948316237,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.467601 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7ba948\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7ba948 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769458504 +0000 UTC m=+0.842063901,LastTimestamp:2026-01-30 00:09:40.875752181 +0000 UTC m=+0.948357558,Count:4,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.471593 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7b16df\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7b16df default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769421023 +0000 UTC m=+0.842026420,LastTimestamp:2026-01-30 00:09:40.877510946 +0000 UTC m=+0.950116313,Count:5,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.477425 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7b7270\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7b7270 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769444464 +0000 UTC m=+0.842049851,LastTimestamp:2026-01-30 00:09:40.877539866 +0000 UTC m=+0.950145243,Count:5,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.482800 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7ba948\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7ba948 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769458504 +0000 UTC m=+0.842063901,LastTimestamp:2026-01-30 00:09:40.877590688 +0000 UTC m=+0.950196065,Count:5,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.486800 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7b16df\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7b16df default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769421023 +0000 UTC m=+0.842026420,LastTimestamp:2026-01-30 00:09:40.879581888 +0000 UTC m=+0.952187315,Count:6,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.490676 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7b7270\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7b7270 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769444464 +0000 UTC m=+0.842049851,LastTimestamp:2026-01-30 00:09:40.879622419 +0000 UTC m=+0.952227836,Count:6,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.495266 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7ba948\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7ba948 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769458504 +0000 UTC m=+0.842063901,LastTimestamp:2026-01-30 00:09:40.879643839 +0000 UTC m=+0.952249256,Count:6,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.501732 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7b16df\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7b16df default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769421023 +0000 UTC m=+0.842026420,LastTimestamp:2026-01-30 00:09:40.879715841 +0000 UTC m=+0.952321258,Count:7,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.506009 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7b7270\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7b7270 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769444464 +0000 UTC m=+0.842049851,LastTimestamp:2026-01-30 00:09:40.879936147 +0000 UTC m=+0.952541554,Count:7,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.510744 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7b16df\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7b16df default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientMemory,Message:Node crc status is now: NodeHasSufficientMemory,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769421023 +0000 UTC m=+0.842026420,LastTimestamp:2026-01-30 00:09:40.880031949 +0000 UTC m=+0.952637386,Count:8,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.514623 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7b7270\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7b7270 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasNoDiskPressure,Message:Node crc status is now: NodeHasNoDiskPressure,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769444464 +0000 UTC m=+0.842049851,LastTimestamp:2026-01-30 00:09:40.880081501 +0000 UTC m=+0.952686918,Count:8,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.522728 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"crc.188f59ad9c7ba948\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"default\"" event="&Event{ObjectMeta:{crc.188f59ad9c7ba948 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:NodeHasSufficientPID,Message:Node crc status is now: NodeHasSufficientPID,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:40.769458504 +0000 UTC m=+0.842063901,LastTimestamp:2026-01-30 00:09:40.880106861 +0000 UTC m=+0.952712288,Count:7,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.529036 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59adbab15b11 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:41.276293905 +0000 UTC m=+1.348899312,LastTimestamp:2026-01-30 00:09:41.276293905 +0000 UTC m=+1.348899312,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.533864 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59adbb656d11 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{wait-for-host-port},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:41.288094993 +0000 UTC m=+1.360700400,LastTimestamp:2026-01-30 00:09:41.288094993 +0000 UTC m=+1.360700400,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.537787 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59adbc979d15 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:41.308161301 +0000 UTC m=+1.380766698,LastTimestamp:2026-01-30 00:09:41.308161301 +0000 UTC m=+1.380766698,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.542923 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.188f59adbc98137c openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:4e08c320b1e9e2405e6e0107bdf7eeb4,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:41.308191612 +0000 UTC m=+1.380796989,LastTimestamp:2026-01-30 00:09:41.308191612 +0000 UTC m=+1.380796989,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.548670 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59adbc97a1c5 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:41.308162501 +0000 UTC m=+1.380767908,LastTimestamp:2026-01-30 00:09:41.308162501 +0000 UTC m=+1.380767908,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.554122 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59ade6d919da openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{wait-for-host-port},},Reason:Created,Message:Created container: wait-for-host-port,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.017096154 +0000 UTC m=+2.089701531,LastTimestamp:2026-01-30 00:09:42.017096154 +0000 UTC m=+2.089701531,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.560699 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59ade6da24f8 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:Created,Message:Created container: kube-controller-manager,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.017164536 +0000 UTC m=+2.089769923,LastTimestamp:2026-01-30 00:09:42.017164536 +0000 UTC m=+2.089769923,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.565034 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ade6da55cc openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Created,Message:Created container: setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.017177036 +0000 UTC m=+2.089782413,LastTimestamp:2026-01-30 00:09:42.017177036 +0000 UTC m=+2.089782413,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.570047 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.188f59ade6ddc3c0 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:4e08c320b1e9e2405e6e0107bdf7eeb4,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Created,Message:Created container: setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.017401792 +0000 UTC m=+2.090007179,LastTimestamp:2026-01-30 00:09:42.017401792 +0000 UTC m=+2.090007179,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.574252 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59ade72e5960 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Created,Message:Created container: setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.022682976 +0000 UTC m=+2.095288353,LastTimestamp:2026-01-30 00:09:42.022682976 +0000 UTC m=+2.095288353,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.578562 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ade7d8adea openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Started,Message:Started container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.033845738 +0000 UTC m=+2.106451125,LastTimestamp:2026-01-30 00:09:42.033845738 +0000 UTC m=+2.106451125,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.583540 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59ade7ee23eb openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:Started,Message:Started container kube-controller-manager,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.035252203 +0000 UTC m=+2.107857580,LastTimestamp:2026-01-30 00:09:42.035252203 +0000 UTC m=+2.107857580,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.587879 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59ade7ee2716 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{wait-for-host-port},},Reason:Started,Message:Started container wait-for-host-port,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.035253014 +0000 UTC m=+2.107858391,LastTimestamp:2026-01-30 00:09:42.035253014 +0000 UTC m=+2.107858391,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.592285 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59ade7fd956c openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.0362643 +0000 UTC m=+2.108869677,LastTimestamp:2026-01-30 00:09:42.0362643 +0000 UTC m=+2.108869677,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.597706 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.188f59ade813d5d1 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:4e08c320b1e9e2405e6e0107bdf7eeb4,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Started,Message:Started container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.037722577 +0000 UTC m=+2.110327954,LastTimestamp:2026-01-30 00:09:42.037722577 +0000 UTC m=+2.110327954,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.603322 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59ade81a1666 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{setup},},Reason:Started,Message:Started container setup,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.038132326 +0000 UTC m=+2.110737703,LastTimestamp:2026-01-30 00:09:42.038132326 +0000 UTC m=+2.110737703,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.608801 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59adfd470703 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Created,Message:Created container: cluster-policy-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.393399043 +0000 UTC m=+2.466004420,LastTimestamp:2026-01-30 00:09:42.393399043 +0000 UTC m=+2.466004420,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.613724 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59adfe13c2ee openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Started,Message:Started container cluster-policy-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.406816494 +0000 UTC m=+2.479421911,LastTimestamp:2026-01-30 00:09:42.406816494 +0000 UTC m=+2.479421911,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.617600 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59adfe2635b4 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-cert-syncer},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.408025524 +0000 UTC m=+2.480630951,LastTimestamp:2026-01-30 00:09:42.408025524 +0000 UTC m=+2.480630951,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.622336 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.188f59ae16004ff5 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:4e08c320b1e9e2405e6e0107bdf7eeb4,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-rbac-proxy-crio},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.808195061 +0000 UTC m=+2.880800438,LastTimestamp:2026-01-30 00:09:42.808195061 +0000 UTC m=+2.880800438,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.627473 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59ae16467c11 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-ensure-env-vars},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.812793873 +0000 UTC m=+2.885399260,LastTimestamp:2026-01-30 00:09:42.812793873 +0000 UTC m=+2.885399260,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.631759 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59ae16b9c0bf openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.820348095 +0000 UTC m=+2.892953512,LastTimestamp:2026-01-30 00:09:42.820348095 +0000 UTC m=+2.892953512,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.636146 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae16ba419e openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:42.820381086 +0000 UTC m=+2.892986503,LastTimestamp:2026-01-30 00:09:42.820381086 +0000 UTC m=+2.892986503,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.642898 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59ae2318c19c openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-cert-syncer},},Reason:Created,Message:Created container: kube-controller-manager-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.027900828 +0000 UTC m=+3.100506205,LastTimestamp:2026-01-30 00:09:43.027900828 +0000 UTC m=+3.100506205,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.647705 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59ae240feb8c openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-cert-syncer},},Reason:Started,Message:Started container kube-controller-manager-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.044098956 +0000 UTC m=+3.116704333,LastTimestamp:2026-01-30 00:09:43.044098956 +0000 UTC m=+3.116704333,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.652075 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59ae2420b769 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-recovery-controller},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.045199721 +0000 UTC m=+3.117805098,LastTimestamp:2026-01-30 00:09:43.045199721 +0000 UTC m=+3.117805098,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.658652 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.658717 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59ae2748aca1 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-ensure-env-vars},},Reason:Created,Message:Created container: etcd-ensure-env-vars,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.098150049 +0000 UTC m=+3.170755426,LastTimestamp:2026-01-30 00:09:43.098150049 +0000 UTC m=+3.170755426,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.663221 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59ae274aa4a1 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler},},Reason:Created,Message:Created container: kube-scheduler,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.098279073 +0000 UTC m=+3.170884450,LastTimestamp:2026-01-30 00:09:43.098279073 +0000 UTC m=+3.170884450,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.668046 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.188f59ae27566f8c openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:4e08c320b1e9e2405e6e0107bdf7eeb4,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-rbac-proxy-crio},},Reason:Created,Message:Created container: kube-rbac-proxy-crio,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.099051916 +0000 UTC m=+3.171657293,LastTimestamp:2026-01-30 00:09:43.099051916 +0000 UTC m=+3.171657293,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.676552 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae275753b8 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Created,Message:Created container: kube-apiserver,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.099110328 +0000 UTC m=+3.171715705,LastTimestamp:2026-01-30 00:09:43.099110328 +0000 UTC m=+3.171715705,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.682744 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae292038d7 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Started,Message:Started container kube-apiserver,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.129053399 +0000 UTC m=+3.201658776,LastTimestamp:2026-01-30 00:09:43.129053399 +0000 UTC m=+3.201658776,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.688336 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59ae2948f869 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler},},Reason:Started,Message:Started container kube-scheduler,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.131723881 +0000 UTC m=+3.204329248,LastTimestamp:2026-01-30 00:09:43.131723881 +0000 UTC m=+3.204329248,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.694621 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae294bd9b3 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-syncer},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.131912627 +0000 UTC m=+3.204518014,LastTimestamp:2026-01-30 00:09:43.131912627 +0000 UTC m=+3.204518014,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.696174 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-machine-config-operator\"" event="&Event{ObjectMeta:{kube-rbac-proxy-crio-crc.188f59ae2953bde1 openshift-machine-config-operator 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-machine-config-operator,Name:kube-rbac-proxy-crio-crc,UID:4e08c320b1e9e2405e6e0107bdf7eeb4,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-rbac-proxy-crio},},Reason:Started,Message:Started container kube-rbac-proxy-crio,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.132429793 +0000 UTC m=+3.205035190,LastTimestamp:2026-01-30 00:09:43.132429793 +0000 UTC m=+3.205035190,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.702971 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59ae295fe4c1 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-cert-syncer},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.133226177 +0000 UTC m=+3.205831564,LastTimestamp:2026-01-30 00:09:43.133226177 +0000 UTC m=+3.205831564,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.709118 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59ae307f9455 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-recovery-controller},},Reason:Created,Message:Created container: kube-controller-manager-recovery-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.252743253 +0000 UTC m=+3.325348630,LastTimestamp:2026-01-30 00:09:43.252743253 +0000 UTC m=+3.325348630,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.715173 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59ae316a4bf6 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager-recovery-controller},},Reason:Started,Message:Started container kube-controller-manager-recovery-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.268125686 +0000 UTC m=+3.340731063,LastTimestamp:2026-01-30 00:09:43.268125686 +0000 UTC m=+3.340731063,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.720802 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59ae35fe5600 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-cert-syncer},},Reason:Created,Message:Created container: kube-scheduler-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.344936448 +0000 UTC m=+3.417541825,LastTimestamp:2026-01-30 00:09:43.344936448 +0000 UTC m=+3.417541825,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.733057 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59ae379cc677 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-cert-syncer},},Reason:Started,Message:Started container kube-scheduler-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.372097143 +0000 UTC m=+3.444702540,LastTimestamp:2026-01-30 00:09:43.372097143 +0000 UTC m=+3.444702540,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.738909 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59ae37b8799f openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-recovery-controller},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.373912479 +0000 UTC m=+3.446517856,LastTimestamp:2026-01-30 00:09:43.373912479 +0000 UTC m=+3.446517856,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.744319 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae37ccf40d openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-syncer},},Reason:Created,Message:Created container: kube-apiserver-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.375254541 +0000 UTC m=+3.447859928,LastTimestamp:2026-01-30 00:09:43.375254541 +0000 UTC m=+3.447859928,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.752482 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae394a2d85 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-syncer},},Reason:Started,Message:Started container kube-apiserver-cert-syncer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.400238469 +0000 UTC m=+3.472843866,LastTimestamp:2026-01-30 00:09:43.400238469 +0000 UTC m=+3.472843866,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.758632 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae396744b3 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-regeneration-controller},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.402144947 +0000 UTC m=+3.474750324,LastTimestamp:2026-01-30 00:09:43.402144947 +0000 UTC m=+3.474750324,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.765373 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59ae49d5c0f7 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-recovery-controller},},Reason:Created,Message:Created container: kube-scheduler-recovery-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.677821175 +0000 UTC m=+3.750426552,LastTimestamp:2026-01-30 00:09:43.677821175 +0000 UTC m=+3.750426552,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.770201 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae4a3d6f44 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-regeneration-controller},},Reason:Created,Message:Created container: kube-apiserver-cert-regeneration-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.684616004 +0000 UTC m=+3.757221381,LastTimestamp:2026-01-30 00:09:43.684616004 +0000 UTC m=+3.757221381,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.775685 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59ae4a477106 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-ensure-env-vars},},Reason:Started,Message:Started container etcd-ensure-env-vars,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.685271814 +0000 UTC m=+3.757877191,LastTimestamp:2026-01-30 00:09:43.685271814 +0000 UTC m=+3.757877191,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.780437 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-scheduler\"" event="&Event{ObjectMeta:{openshift-kube-scheduler-crc.188f59ae4b2da699 openshift-kube-scheduler 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-scheduler,Name:openshift-kube-scheduler-crc,UID:0b638b8f4bb0070e40528db779baf6a2,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-scheduler-recovery-controller},},Reason:Started,Message:Started container kube-scheduler-recovery-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.700358809 +0000 UTC m=+3.772964186,LastTimestamp:2026-01-30 00:09:43.700358809 +0000 UTC m=+3.772964186,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.785408 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae4b762a5f openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-cert-regeneration-controller},},Reason:Started,Message:Started container kube-apiserver-cert-regeneration-controller,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.705111135 +0000 UTC m=+3.777716502,LastTimestamp:2026-01-30 00:09:43.705111135 +0000 UTC m=+3.777716502,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.789926 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae4b8ab637 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-insecure-readyz},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.706457655 +0000 UTC m=+3.779063032,LastTimestamp:2026-01-30 00:09:43.706457655 +0000 UTC m=+3.779063032,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.794484 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59ae5407f69a openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-resources-copy},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.848883866 +0000 UTC m=+3.921489243,LastTimestamp:2026-01-30 00:09:43.848883866 +0000 UTC m=+3.921489243,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.799932 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae5ca3713b openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-insecure-readyz},},Reason:Created,Message:Created container: kube-apiserver-insecure-readyz,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:43.993291067 +0000 UTC m=+4.065896444,LastTimestamp:2026-01-30 00:09:43.993291067 +0000 UTC m=+4.065896444,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.805692 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae5df8a490 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-insecure-readyz},},Reason:Started,Message:Started container kube-apiserver-insecure-readyz,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:44.015651984 +0000 UTC m=+4.088257361,LastTimestamp:2026-01-30 00:09:44.015651984 +0000 UTC m=+4.088257361,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.809994 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae5e1612bd openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:44.017580733 +0000 UTC m=+4.090186110,LastTimestamp:2026-01-30 00:09:44.017580733 +0000 UTC m=+4.090186110,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.814793 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59ae62648d52 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-resources-copy},},Reason:Created,Message:Created container: etcd-resources-copy,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:44.089832786 +0000 UTC m=+4.162438163,LastTimestamp:2026-01-30 00:09:44.089832786 +0000 UTC m=+4.162438163,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.822374 5113 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.823356 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59ae63e46bec openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.initContainers{etcd-resources-copy},},Reason:Started,Message:Started container etcd-resources-copy,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:44.11499006 +0000 UTC m=+4.187595437,LastTimestamp:2026-01-30 00:09:44.11499006 +0000 UTC m=+4.187595437,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.830453 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae6c1b6aa2 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Created,Message:Created container: kube-apiserver-check-endpoints,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:44.252811938 +0000 UTC m=+4.325417315,LastTimestamp:2026-01-30 00:09:44.252811938 +0000 UTC m=+4.325417315,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.837187 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae6cce9e25 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Started,Message:Started container kube-apiserver-check-endpoints,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:44.264556069 +0000 UTC m=+4.337161446,LastTimestamp:2026-01-30 00:09:44.264556069 +0000 UTC m=+4.337161446,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.843355 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59ae906ed88d openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcdctl},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:44.862259341 +0000 UTC m=+4.934864728,LastTimestamp:2026-01-30 00:09:44.862259341 +0000 UTC m=+4.934864728,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.848771 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59aea120b7bc openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcdctl},},Reason:Created,Message:Created container: etcdctl,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:45.142351804 +0000 UTC m=+5.214957181,LastTimestamp:2026-01-30 00:09:45.142351804 +0000 UTC m=+5.214957181,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.854251 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59aea1ec887f openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcdctl},},Reason:Started,Message:Started container etcdctl,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:45.155709055 +0000 UTC m=+5.228314432,LastTimestamp:2026-01-30 00:09:45.155709055 +0000 UTC m=+5.228314432,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.858458 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59aea1ffee09 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:45.156980233 +0000 UTC m=+5.229585630,LastTimestamp:2026-01-30 00:09:45.156980233 +0000 UTC m=+5.229585630,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.862963 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59aeb0074257 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd},},Reason:Created,Message:Created container: etcd,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:45.392341591 +0000 UTC m=+5.464946978,LastTimestamp:2026-01-30 00:09:45.392341591 +0000 UTC m=+5.464946978,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.867922 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59aeb0efd553 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd},},Reason:Started,Message:Started container etcd,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:45.407583571 +0000 UTC m=+5.480188988,LastTimestamp:2026-01-30 00:09:45.407583571 +0000 UTC m=+5.480188988,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.871663 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59aeb1061fd3 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-metrics},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:45.409044435 +0000 UTC m=+5.481649812,LastTimestamp:2026-01-30 00:09:45.409044435 +0000 UTC m=+5.481649812,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.875450 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59aebe9f8d98 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-metrics},},Reason:Created,Message:Created container: etcd-metrics,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:45.637203352 +0000 UTC m=+5.709808729,LastTimestamp:2026-01-30 00:09:45.637203352 +0000 UTC m=+5.709808729,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.879571 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59aebfdbba99 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-metrics},},Reason:Started,Message:Started container etcd-metrics,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:45.657924249 +0000 UTC m=+5.730529626,LastTimestamp:2026-01-30 00:09:45.657924249 +0000 UTC m=+5.730529626,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.883143 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59aebff05ad7 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-readyz},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:45.659275991 +0000 UTC m=+5.731881378,LastTimestamp:2026-01-30 00:09:45.659275991 +0000 UTC m=+5.731881378,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.887461 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.887729 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.888539 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.888576 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.888587 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.888909 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.889235 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59aecd125e5e openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-readyz},},Reason:Created,Message:Created container: etcd-readyz,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:45.879608926 +0000 UTC m=+5.952214303,LastTimestamp:2026-01-30 00:09:45.879608926 +0000 UTC m=+5.952214303,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.893322 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59aecde74c0d openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-readyz},},Reason:Started,Message:Started container etcd-readyz,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:45.893563405 +0000 UTC m=+5.966168822,LastTimestamp:2026-01-30 00:09:45.893563405 +0000 UTC m=+5.966168822,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.898224 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59aece090e9f openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-rev},},Reason:Pulled,Message:Container image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:45.895775903 +0000 UTC m=+5.968381280,LastTimestamp:2026-01-30 00:09:45.895775903 +0000 UTC m=+5.968381280,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.904681 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59aedae7f7f8 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-rev},},Reason:Created,Message:Created container: etcd-rev,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:46.111711224 +0000 UTC m=+6.184316601,LastTimestamp:2026-01-30 00:09:46.111711224 +0000 UTC m=+6.184316601,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.905758 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-etcd\"" event="&Event{ObjectMeta:{etcd-crc.188f59aedbfee249 openshift-etcd 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-etcd,Name:etcd-crc,UID:20c5c5b4bed930554494851fe3cb2b2a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{etcd-rev},},Reason:Started,Message:Started container etcd-rev,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:46.129990217 +0000 UTC m=+6.202595594,LastTimestamp:2026-01-30 00:09:46.129990217 +0000 UTC m=+6.202595594,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.910318 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/0.log" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.912094 5113 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="8cced52517a20ca5075db64b94072cb4001674e94a06e701dec72f6a07ab2aa0" exitCode=255 Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.912165 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerDied","Data":"8cced52517a20ca5075db64b94072cb4001674e94a06e701dec72f6a07ab2aa0"} Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.912405 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.912969 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.913003 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.913014 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.913112 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event=< Jan 30 00:10:00 crc kubenswrapper[5113]: &Event{ObjectMeta:{kube-controller-manager-crc.188f59af2e9e9dea openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:ProbeError,Message:Startup probe error: Get "https://localhost:10357/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers) Jan 30 00:10:00 crc kubenswrapper[5113]: body: Jan 30 00:10:00 crc kubenswrapper[5113]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:47.516190186 +0000 UTC m=+7.588795603,LastTimestamp:2026-01-30 00:09:47.516190186 +0000 UTC m=+7.588795603,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 30 00:10:00 crc kubenswrapper[5113]: > Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.913364 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:00 crc kubenswrapper[5113]: I0130 00:10:00.913676 5113 scope.go:117] "RemoveContainer" containerID="8cced52517a20ca5075db64b94072cb4001674e94a06e701dec72f6a07ab2aa0" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.920831 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59af2ea0b3b9 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Unhealthy,Message:Startup probe failed: Get \"https://localhost:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:47.516326841 +0000 UTC m=+7.588932248,LastTimestamp:2026-01-30 00:09:47.516326841 +0000 UTC m=+7.588932248,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.926998 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event=< Jan 30 00:10:00 crc kubenswrapper[5113]: &Event{ObjectMeta:{kube-apiserver-crc.188f59b102967cbd openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:ProbeError,Message:Startup probe error: HTTP probe failed with statuscode: 403 Jan 30 00:10:00 crc kubenswrapper[5113]: body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 30 00:10:00 crc kubenswrapper[5113]: Jan 30 00:10:00 crc kubenswrapper[5113]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:55.367394493 +0000 UTC m=+15.439999920,LastTimestamp:2026-01-30 00:09:55.367394493 +0000 UTC m=+15.439999920,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 30 00:10:00 crc kubenswrapper[5113]: > Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.941428 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b102978ba5 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Unhealthy,Message:Startup probe failed: HTTP probe failed with statuscode: 403,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:55.367463845 +0000 UTC m=+15.440069272,LastTimestamp:2026-01-30 00:09:55.367463845 +0000 UTC m=+15.440069272,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.948532 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b102967cbd\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event=< Jan 30 00:10:00 crc kubenswrapper[5113]: &Event{ObjectMeta:{kube-apiserver-crc.188f59b102967cbd openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:ProbeError,Message:Startup probe error: HTTP probe failed with statuscode: 403 Jan 30 00:10:00 crc kubenswrapper[5113]: body: {"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 30 00:10:00 crc kubenswrapper[5113]: Jan 30 00:10:00 crc kubenswrapper[5113]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:55.367394493 +0000 UTC m=+15.439999920,LastTimestamp:2026-01-30 00:09:55.374834043 +0000 UTC m=+15.447439420,Count:2,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 30 00:10:00 crc kubenswrapper[5113]: > Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.957044 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b102978ba5\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b102978ba5 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver},},Reason:Unhealthy,Message:Startup probe failed: HTTP probe failed with statuscode: 403,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:55.367463845 +0000 UTC m=+15.440069272,LastTimestamp:2026-01-30 00:09:55.374878484 +0000 UTC m=+15.447483851,Count:2,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.963404 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event=< Jan 30 00:10:00 crc kubenswrapper[5113]: &Event{ObjectMeta:{kube-controller-manager-crc.188f59b182ad8698 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:ProbeError,Message:Startup probe error: Get "https://localhost:10357/healthz": context deadline exceeded Jan 30 00:10:00 crc kubenswrapper[5113]: body: Jan 30 00:10:00 crc kubenswrapper[5113]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:57.516387992 +0000 UTC m=+17.588993409,LastTimestamp:2026-01-30 00:09:57.516387992 +0000 UTC m=+17.588993409,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 30 00:10:00 crc kubenswrapper[5113]: > Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.967410 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-controller-manager\"" event="&Event{ObjectMeta:{kube-controller-manager-crc.188f59b182b0526f openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:9f0bc7fcb0822a2c13eb2d22cd8c0641,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{cluster-policy-controller},},Reason:Unhealthy,Message:Startup probe failed: Get \"https://localhost:10357/healthz\": context deadline exceeded,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:57.516571247 +0000 UTC m=+17.589176654,LastTimestamp:2026-01-30 00:09:57.516571247 +0000 UTC m=+17.589176654,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.972479 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event=< Jan 30 00:10:00 crc kubenswrapper[5113]: &Event{ObjectMeta:{kube-apiserver-crc.188f59b2304a1efa openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:ProbeError,Message:Readiness probe error: Get "https://192.168.126.11:17697/healthz": read tcp 192.168.126.11:33026->192.168.126.11:17697: read: connection reset by peer Jan 30 00:10:00 crc kubenswrapper[5113]: body: Jan 30 00:10:00 crc kubenswrapper[5113]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:00.429108986 +0000 UTC m=+20.501714363,LastTimestamp:2026-01-30 00:10:00.429108986 +0000 UTC m=+20.501714363,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 30 00:10:00 crc kubenswrapper[5113]: > Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.979130 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b2304abf91 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Unhealthy,Message:Readiness probe failed: Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:33026->192.168.126.11:17697: read: connection reset by peer,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:00.429150097 +0000 UTC m=+20.501755474,LastTimestamp:2026-01-30 00:10:00.429150097 +0000 UTC m=+20.501755474,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.984627 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event=< Jan 30 00:10:00 crc kubenswrapper[5113]: &Event{ObjectMeta:{kube-apiserver-crc.188f59b2304d9127 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:ProbeError,Message:Readiness probe error: Get "https://192.168.126.11:17697/healthz": dial tcp 192.168.126.11:17697: connect: connection refused Jan 30 00:10:00 crc kubenswrapper[5113]: body: Jan 30 00:10:00 crc kubenswrapper[5113]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:00.429334823 +0000 UTC m=+20.501940200,LastTimestamp:2026-01-30 00:10:00.429334823 +0000 UTC m=+20.501940200,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 30 00:10:00 crc kubenswrapper[5113]: > Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.989483 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b2304de736 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Unhealthy,Message:Readiness probe failed: Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:00.429356854 +0000 UTC m=+20.501962231,LastTimestamp:2026-01-30 00:10:00.429356854 +0000 UTC m=+20.501962231,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:00 crc kubenswrapper[5113]: E0130 00:10:00.993887 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59ae5e1612bd\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae5e1612bd openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:44.017580733 +0000 UTC m=+4.090186110,LastTimestamp:2026-01-30 00:10:00.914508084 +0000 UTC m=+20.987113451,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:01 crc kubenswrapper[5113]: E0130 00:10:01.139229 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59ae6c1b6aa2\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae6c1b6aa2 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Created,Message:Created container: kube-apiserver-check-endpoints,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:44.252811938 +0000 UTC m=+4.325417315,LastTimestamp:2026-01-30 00:10:01.134121417 +0000 UTC m=+21.206726804,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:01 crc kubenswrapper[5113]: E0130 00:10:01.152302 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59ae6cce9e25\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae6cce9e25 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Started,Message:Started container kube-apiserver-check-endpoints,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:44.264556069 +0000 UTC m=+4.337161446,LastTimestamp:2026-01-30 00:10:01.146917761 +0000 UTC m=+21.219523148,Count:2,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:01 crc kubenswrapper[5113]: I0130 00:10:01.512205 5113 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:10:01 crc kubenswrapper[5113]: I0130 00:10:01.658994 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:01 crc kubenswrapper[5113]: I0130 00:10:01.919620 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/0.log" Jan 30 00:10:01 crc kubenswrapper[5113]: I0130 00:10:01.921538 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"1d7415f782d4736f2923527f4fa6dd1250cb4cb92de205b55ae63eaf3bfbf995"} Jan 30 00:10:01 crc kubenswrapper[5113]: I0130 00:10:01.921792 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:01 crc kubenswrapper[5113]: I0130 00:10:01.922448 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:01 crc kubenswrapper[5113]: I0130 00:10:01.922502 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:01 crc kubenswrapper[5113]: I0130 00:10:01.922515 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:01 crc kubenswrapper[5113]: E0130 00:10:01.922977 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:02 crc kubenswrapper[5113]: I0130 00:10:02.656667 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:02 crc kubenswrapper[5113]: I0130 00:10:02.926447 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/1.log" Jan 30 00:10:02 crc kubenswrapper[5113]: I0130 00:10:02.927233 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/0.log" Jan 30 00:10:02 crc kubenswrapper[5113]: I0130 00:10:02.929421 5113 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="1d7415f782d4736f2923527f4fa6dd1250cb4cb92de205b55ae63eaf3bfbf995" exitCode=255 Jan 30 00:10:02 crc kubenswrapper[5113]: I0130 00:10:02.929496 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerDied","Data":"1d7415f782d4736f2923527f4fa6dd1250cb4cb92de205b55ae63eaf3bfbf995"} Jan 30 00:10:02 crc kubenswrapper[5113]: I0130 00:10:02.929654 5113 scope.go:117] "RemoveContainer" containerID="8cced52517a20ca5075db64b94072cb4001674e94a06e701dec72f6a07ab2aa0" Jan 30 00:10:02 crc kubenswrapper[5113]: I0130 00:10:02.929680 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:02 crc kubenswrapper[5113]: I0130 00:10:02.930408 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:02 crc kubenswrapper[5113]: I0130 00:10:02.930512 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:02 crc kubenswrapper[5113]: I0130 00:10:02.930597 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:02 crc kubenswrapper[5113]: E0130 00:10:02.931511 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:02 crc kubenswrapper[5113]: I0130 00:10:02.931938 5113 scope.go:117] "RemoveContainer" containerID="1d7415f782d4736f2923527f4fa6dd1250cb4cb92de205b55ae63eaf3bfbf995" Jan 30 00:10:02 crc kubenswrapper[5113]: E0130 00:10:02.932270 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:10:02 crc kubenswrapper[5113]: E0130 00:10:02.937090 5113 event.go:359] "Server rejected event (will not retry!)" err="events is forbidden: User \"system:anonymous\" cannot create resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b2c57c75ab openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:BackOff,Message:Back-off restarting failed container kube-apiserver-check-endpoints in pod kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:02.932213163 +0000 UTC m=+23.004818540,LastTimestamp:2026-01-30 00:10:02.932213163 +0000 UTC m=+23.004818540,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:03 crc kubenswrapper[5113]: E0130 00:10:03.316031 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Jan 30 00:10:03 crc kubenswrapper[5113]: I0130 00:10:03.657675 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:03 crc kubenswrapper[5113]: I0130 00:10:03.938349 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/1.log" Jan 30 00:10:03 crc kubenswrapper[5113]: I0130 00:10:03.941241 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:03 crc kubenswrapper[5113]: I0130 00:10:03.942070 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:03 crc kubenswrapper[5113]: I0130 00:10:03.942112 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:03 crc kubenswrapper[5113]: I0130 00:10:03.942124 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:03 crc kubenswrapper[5113]: E0130 00:10:03.942396 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:03 crc kubenswrapper[5113]: I0130 00:10:03.942670 5113 scope.go:117] "RemoveContainer" containerID="1d7415f782d4736f2923527f4fa6dd1250cb4cb92de205b55ae63eaf3bfbf995" Jan 30 00:10:03 crc kubenswrapper[5113]: E0130 00:10:03.942841 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:10:03 crc kubenswrapper[5113]: E0130 00:10:03.950667 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b2c57c75ab\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b2c57c75ab openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:BackOff,Message:Back-off restarting failed container kube-apiserver-check-endpoints in pod kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:02.932213163 +0000 UTC m=+23.004818540,LastTimestamp:2026-01-30 00:10:03.942814472 +0000 UTC m=+24.015419849,Count:2,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.142497 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.142863 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.144034 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.144247 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.144451 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:04 crc kubenswrapper[5113]: E0130 00:10:04.145267 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.164140 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.521433 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.521780 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.523107 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.523167 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.523181 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:04 crc kubenswrapper[5113]: E0130 00:10:04.523691 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.528578 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.657100 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.944095 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.944201 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.945161 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.945214 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.945232 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:04 crc kubenswrapper[5113]: E0130 00:10:04.945679 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.946647 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.946683 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:04 crc kubenswrapper[5113]: I0130 00:10:04.946699 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:04 crc kubenswrapper[5113]: E0130 00:10:04.947203 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:05 crc kubenswrapper[5113]: I0130 00:10:05.656425 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:05 crc kubenswrapper[5113]: E0130 00:10:05.880351 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.Node: nodes \"crc\" is forbidden: User \"system:anonymous\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" Jan 30 00:10:06 crc kubenswrapper[5113]: I0130 00:10:06.659428 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:06 crc kubenswrapper[5113]: I0130 00:10:06.779825 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:06 crc kubenswrapper[5113]: I0130 00:10:06.780726 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:06 crc kubenswrapper[5113]: I0130 00:10:06.780761 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:06 crc kubenswrapper[5113]: I0130 00:10:06.780775 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:06 crc kubenswrapper[5113]: I0130 00:10:06.780801 5113 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:10:06 crc kubenswrapper[5113]: E0130 00:10:06.791046 5113 kubelet_node_status.go:116] "Unable to register node with API server, error getting existing node" err="nodes \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Jan 30 00:10:07 crc kubenswrapper[5113]: I0130 00:10:07.659213 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:08 crc kubenswrapper[5113]: E0130 00:10:08.001823 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:anonymous\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" Jan 30 00:10:08 crc kubenswrapper[5113]: I0130 00:10:08.658181 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:09 crc kubenswrapper[5113]: I0130 00:10:09.657998 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:09 crc kubenswrapper[5113]: E0130 00:10:09.930901 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:anonymous\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" Jan 30 00:10:10 crc kubenswrapper[5113]: E0130 00:10:10.322415 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Jan 30 00:10:10 crc kubenswrapper[5113]: E0130 00:10:10.524032 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.RuntimeClass: runtimeclasses.node.k8s.io is forbidden: User \"system:anonymous\" cannot list resource \"runtimeclasses\" in API group \"node.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" Jan 30 00:10:10 crc kubenswrapper[5113]: I0130 00:10:10.657917 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:10 crc kubenswrapper[5113]: E0130 00:10:10.822645 5113 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:10:11 crc kubenswrapper[5113]: I0130 00:10:11.512181 5113 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:10:11 crc kubenswrapper[5113]: I0130 00:10:11.512625 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:11 crc kubenswrapper[5113]: I0130 00:10:11.513712 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:11 crc kubenswrapper[5113]: I0130 00:10:11.513774 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:11 crc kubenswrapper[5113]: I0130 00:10:11.513799 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:11 crc kubenswrapper[5113]: E0130 00:10:11.514338 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:11 crc kubenswrapper[5113]: I0130 00:10:11.514735 5113 scope.go:117] "RemoveContainer" containerID="1d7415f782d4736f2923527f4fa6dd1250cb4cb92de205b55ae63eaf3bfbf995" Jan 30 00:10:11 crc kubenswrapper[5113]: E0130 00:10:11.515033 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:10:11 crc kubenswrapper[5113]: E0130 00:10:11.522963 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b2c57c75ab\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b2c57c75ab openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:BackOff,Message:Back-off restarting failed container kube-apiserver-check-endpoints in pod kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:02.932213163 +0000 UTC m=+23.004818540,LastTimestamp:2026-01-30 00:10:11.514985828 +0000 UTC m=+31.587591245,Count:3,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:11 crc kubenswrapper[5113]: I0130 00:10:11.659378 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:11 crc kubenswrapper[5113]: I0130 00:10:11.923193 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:10:11 crc kubenswrapper[5113]: I0130 00:10:11.962600 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:11 crc kubenswrapper[5113]: I0130 00:10:11.963261 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:11 crc kubenswrapper[5113]: I0130 00:10:11.963326 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:11 crc kubenswrapper[5113]: I0130 00:10:11.963352 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:11 crc kubenswrapper[5113]: E0130 00:10:11.964157 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:11 crc kubenswrapper[5113]: I0130 00:10:11.964647 5113 scope.go:117] "RemoveContainer" containerID="1d7415f782d4736f2923527f4fa6dd1250cb4cb92de205b55ae63eaf3bfbf995" Jan 30 00:10:11 crc kubenswrapper[5113]: E0130 00:10:11.964957 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:10:11 crc kubenswrapper[5113]: E0130 00:10:11.972585 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b2c57c75ab\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b2c57c75ab openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:BackOff,Message:Back-off restarting failed container kube-apiserver-check-endpoints in pod kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:02.932213163 +0000 UTC m=+23.004818540,LastTimestamp:2026-01-30 00:10:11.964900215 +0000 UTC m=+32.037505632,Count:4,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:12 crc kubenswrapper[5113]: I0130 00:10:12.660831 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:13 crc kubenswrapper[5113]: I0130 00:10:13.659475 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:13 crc kubenswrapper[5113]: I0130 00:10:13.791618 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:13 crc kubenswrapper[5113]: I0130 00:10:13.793031 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:13 crc kubenswrapper[5113]: I0130 00:10:13.793129 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:13 crc kubenswrapper[5113]: I0130 00:10:13.793158 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:13 crc kubenswrapper[5113]: I0130 00:10:13.793209 5113 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:10:13 crc kubenswrapper[5113]: E0130 00:10:13.808842 5113 kubelet_node_status.go:116] "Unable to register node with API server, error getting existing node" err="nodes \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Jan 30 00:10:14 crc kubenswrapper[5113]: I0130 00:10:14.659054 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:15 crc kubenswrapper[5113]: I0130 00:10:15.657917 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:16 crc kubenswrapper[5113]: I0130 00:10:16.656768 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:17 crc kubenswrapper[5113]: E0130 00:10:17.332684 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Jan 30 00:10:17 crc kubenswrapper[5113]: I0130 00:10:17.660117 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:18 crc kubenswrapper[5113]: I0130 00:10:18.658147 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:19 crc kubenswrapper[5113]: I0130 00:10:19.656662 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:20 crc kubenswrapper[5113]: I0130 00:10:20.659886 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:20 crc kubenswrapper[5113]: I0130 00:10:20.808984 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:20 crc kubenswrapper[5113]: I0130 00:10:20.810195 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:20 crc kubenswrapper[5113]: I0130 00:10:20.810275 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:20 crc kubenswrapper[5113]: I0130 00:10:20.810299 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:20 crc kubenswrapper[5113]: I0130 00:10:20.810338 5113 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:10:20 crc kubenswrapper[5113]: E0130 00:10:20.823699 5113 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:10:20 crc kubenswrapper[5113]: E0130 00:10:20.827068 5113 kubelet_node_status.go:116] "Unable to register node with API server, error getting existing node" err="nodes \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Jan 30 00:10:21 crc kubenswrapper[5113]: I0130 00:10:21.660297 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:22 crc kubenswrapper[5113]: I0130 00:10:22.658918 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:23 crc kubenswrapper[5113]: E0130 00:10:23.595781 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User \"system:anonymous\" cannot list resource \"csidrivers\" in API group \"storage.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.CSIDriver" Jan 30 00:10:23 crc kubenswrapper[5113]: I0130 00:10:23.657883 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:24 crc kubenswrapper[5113]: E0130 00:10:24.342828 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Jan 30 00:10:24 crc kubenswrapper[5113]: I0130 00:10:24.660169 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:25 crc kubenswrapper[5113]: I0130 00:10:25.661005 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:26 crc kubenswrapper[5113]: E0130 00:10:26.380927 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.Service: services is forbidden: User \"system:anonymous\" cannot list resource \"services\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Service" Jan 30 00:10:26 crc kubenswrapper[5113]: I0130 00:10:26.660000 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:26 crc kubenswrapper[5113]: I0130 00:10:26.772055 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:26 crc kubenswrapper[5113]: I0130 00:10:26.773352 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:26 crc kubenswrapper[5113]: I0130 00:10:26.773591 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:26 crc kubenswrapper[5113]: I0130 00:10:26.773797 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:26 crc kubenswrapper[5113]: E0130 00:10:26.774510 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:26 crc kubenswrapper[5113]: I0130 00:10:26.775111 5113 scope.go:117] "RemoveContainer" containerID="1d7415f782d4736f2923527f4fa6dd1250cb4cb92de205b55ae63eaf3bfbf995" Jan 30 00:10:26 crc kubenswrapper[5113]: E0130 00:10:26.786707 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59ae5e1612bd\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae5e1612bd openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:44.017580733 +0000 UTC m=+4.090186110,LastTimestamp:2026-01-30 00:10:26.776454318 +0000 UTC m=+46.849059725,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:26 crc kubenswrapper[5113]: E0130 00:10:26.981742 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59ae6c1b6aa2\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae6c1b6aa2 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Created,Message:Created container: kube-apiserver-check-endpoints,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:44.252811938 +0000 UTC m=+4.325417315,LastTimestamp:2026-01-30 00:10:26.975742407 +0000 UTC m=+47.048347794,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:26 crc kubenswrapper[5113]: E0130 00:10:26.993632 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59ae6cce9e25\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59ae6cce9e25 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:Started,Message:Started container kube-apiserver-check-endpoints,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:09:44.264556069 +0000 UTC m=+4.337161446,LastTimestamp:2026-01-30 00:10:26.986993572 +0000 UTC m=+47.059598939,Count:3,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:27 crc kubenswrapper[5113]: I0130 00:10:27.010517 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/1.log" Jan 30 00:10:27 crc kubenswrapper[5113]: I0130 00:10:27.012368 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"f498a9356df2ed3c16c34d6b0e8d96c2b1fa9a408a2b7d8c7d896cc717d6b8ec"} Jan 30 00:10:27 crc kubenswrapper[5113]: I0130 00:10:27.012681 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:27 crc kubenswrapper[5113]: I0130 00:10:27.013369 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:27 crc kubenswrapper[5113]: I0130 00:10:27.013410 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:27 crc kubenswrapper[5113]: I0130 00:10:27.013428 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:27 crc kubenswrapper[5113]: E0130 00:10:27.013944 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:27 crc kubenswrapper[5113]: E0130 00:10:27.060633 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.RuntimeClass: runtimeclasses.node.k8s.io is forbidden: User \"system:anonymous\" cannot list resource \"runtimeclasses\" in API group \"node.k8s.io\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.RuntimeClass" Jan 30 00:10:27 crc kubenswrapper[5113]: I0130 00:10:27.657832 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:27 crc kubenswrapper[5113]: I0130 00:10:27.827693 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:27 crc kubenswrapper[5113]: I0130 00:10:27.829334 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:27 crc kubenswrapper[5113]: I0130 00:10:27.829408 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:27 crc kubenswrapper[5113]: I0130 00:10:27.829427 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:27 crc kubenswrapper[5113]: I0130 00:10:27.829925 5113 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:10:27 crc kubenswrapper[5113]: E0130 00:10:27.847352 5113 kubelet_node_status.go:116] "Unable to register node with API server, error getting existing node" err="nodes \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Jan 30 00:10:28 crc kubenswrapper[5113]: I0130 00:10:28.652338 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:29 crc kubenswrapper[5113]: I0130 00:10:29.019470 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/2.log" Jan 30 00:10:29 crc kubenswrapper[5113]: I0130 00:10:29.020160 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/1.log" Jan 30 00:10:29 crc kubenswrapper[5113]: I0130 00:10:29.023104 5113 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="f498a9356df2ed3c16c34d6b0e8d96c2b1fa9a408a2b7d8c7d896cc717d6b8ec" exitCode=255 Jan 30 00:10:29 crc kubenswrapper[5113]: I0130 00:10:29.023170 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerDied","Data":"f498a9356df2ed3c16c34d6b0e8d96c2b1fa9a408a2b7d8c7d896cc717d6b8ec"} Jan 30 00:10:29 crc kubenswrapper[5113]: I0130 00:10:29.023279 5113 scope.go:117] "RemoveContainer" containerID="1d7415f782d4736f2923527f4fa6dd1250cb4cb92de205b55ae63eaf3bfbf995" Jan 30 00:10:29 crc kubenswrapper[5113]: I0130 00:10:29.023639 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:29 crc kubenswrapper[5113]: I0130 00:10:29.024753 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:29 crc kubenswrapper[5113]: I0130 00:10:29.024835 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:29 crc kubenswrapper[5113]: I0130 00:10:29.025734 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:29 crc kubenswrapper[5113]: E0130 00:10:29.032013 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:29 crc kubenswrapper[5113]: I0130 00:10:29.032571 5113 scope.go:117] "RemoveContainer" containerID="f498a9356df2ed3c16c34d6b0e8d96c2b1fa9a408a2b7d8c7d896cc717d6b8ec" Jan 30 00:10:29 crc kubenswrapper[5113]: E0130 00:10:29.033052 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:10:29 crc kubenswrapper[5113]: E0130 00:10:29.041847 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b2c57c75ab\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b2c57c75ab openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:BackOff,Message:Back-off restarting failed container kube-apiserver-check-endpoints in pod kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:02.932213163 +0000 UTC m=+23.004818540,LastTimestamp:2026-01-30 00:10:29.032978098 +0000 UTC m=+49.105583515,Count:5,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:29 crc kubenswrapper[5113]: E0130 00:10:29.650103 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.Node: nodes \"crc\" is forbidden: User \"system:anonymous\" cannot list resource \"nodes\" in API group \"\" at the cluster scope" logger="UnhandledError" reflector="k8s.io/client-go/informers/factory.go:160" type="*v1.Node" Jan 30 00:10:29 crc kubenswrapper[5113]: I0130 00:10:29.656184 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:30 crc kubenswrapper[5113]: I0130 00:10:30.028942 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/2.log" Jan 30 00:10:30 crc kubenswrapper[5113]: I0130 00:10:30.659399 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:30 crc kubenswrapper[5113]: E0130 00:10:30.824297 5113 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:10:31 crc kubenswrapper[5113]: E0130 00:10:31.351399 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Jan 30 00:10:31 crc kubenswrapper[5113]: I0130 00:10:31.512174 5113 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:10:31 crc kubenswrapper[5113]: I0130 00:10:31.512582 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:31 crc kubenswrapper[5113]: I0130 00:10:31.514435 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:31 crc kubenswrapper[5113]: I0130 00:10:31.514506 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:31 crc kubenswrapper[5113]: I0130 00:10:31.514621 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:31 crc kubenswrapper[5113]: E0130 00:10:31.515214 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:31 crc kubenswrapper[5113]: I0130 00:10:31.515927 5113 scope.go:117] "RemoveContainer" containerID="f498a9356df2ed3c16c34d6b0e8d96c2b1fa9a408a2b7d8c7d896cc717d6b8ec" Jan 30 00:10:31 crc kubenswrapper[5113]: E0130 00:10:31.516401 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:10:31 crc kubenswrapper[5113]: E0130 00:10:31.524495 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b2c57c75ab\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b2c57c75ab openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:BackOff,Message:Back-off restarting failed container kube-apiserver-check-endpoints in pod kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:02.932213163 +0000 UTC m=+23.004818540,LastTimestamp:2026-01-30 00:10:31.516342712 +0000 UTC m=+51.588948129,Count:6,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:31 crc kubenswrapper[5113]: I0130 00:10:31.659630 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:32 crc kubenswrapper[5113]: I0130 00:10:32.660159 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:33 crc kubenswrapper[5113]: I0130 00:10:33.659840 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:34 crc kubenswrapper[5113]: I0130 00:10:34.658929 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:34 crc kubenswrapper[5113]: I0130 00:10:34.848379 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:34 crc kubenswrapper[5113]: I0130 00:10:34.850217 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:34 crc kubenswrapper[5113]: I0130 00:10:34.850286 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:34 crc kubenswrapper[5113]: I0130 00:10:34.850311 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:34 crc kubenswrapper[5113]: I0130 00:10:34.850357 5113 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:10:34 crc kubenswrapper[5113]: E0130 00:10:34.866314 5113 kubelet_node_status.go:116] "Unable to register node with API server, error getting existing node" err="nodes \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Jan 30 00:10:35 crc kubenswrapper[5113]: I0130 00:10:35.659426 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:36 crc kubenswrapper[5113]: I0130 00:10:36.659704 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:37 crc kubenswrapper[5113]: I0130 00:10:37.013993 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:10:37 crc kubenswrapper[5113]: I0130 00:10:37.014325 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:37 crc kubenswrapper[5113]: I0130 00:10:37.015329 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:37 crc kubenswrapper[5113]: I0130 00:10:37.015379 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:37 crc kubenswrapper[5113]: I0130 00:10:37.015399 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:37 crc kubenswrapper[5113]: E0130 00:10:37.015942 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:37 crc kubenswrapper[5113]: I0130 00:10:37.016365 5113 scope.go:117] "RemoveContainer" containerID="f498a9356df2ed3c16c34d6b0e8d96c2b1fa9a408a2b7d8c7d896cc717d6b8ec" Jan 30 00:10:37 crc kubenswrapper[5113]: E0130 00:10:37.016705 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:10:37 crc kubenswrapper[5113]: E0130 00:10:37.024556 5113 event.go:359] "Server rejected event (will not retry!)" err="events \"kube-apiserver-crc.188f59b2c57c75ab\" is forbidden: User \"system:anonymous\" cannot patch resource \"events\" in API group \"\" in the namespace \"openshift-kube-apiserver\"" event="&Event{ObjectMeta:{kube-apiserver-crc.188f59b2c57c75ab openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-crc,UID:3a14caf222afb62aaabdc47808b6f944,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-apiserver-check-endpoints},},Reason:BackOff,Message:Back-off restarting failed container kube-apiserver-check-endpoints in pod kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944),Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:10:02.932213163 +0000 UTC m=+23.004818540,LastTimestamp:2026-01-30 00:10:37.016654769 +0000 UTC m=+57.089260186,Count:7,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:10:37 crc kubenswrapper[5113]: I0130 00:10:37.649618 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:10:37 crc kubenswrapper[5113]: I0130 00:10:37.649940 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:37 crc kubenswrapper[5113]: I0130 00:10:37.651152 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:37 crc kubenswrapper[5113]: I0130 00:10:37.651224 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:37 crc kubenswrapper[5113]: I0130 00:10:37.651245 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:37 crc kubenswrapper[5113]: E0130 00:10:37.651834 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:37 crc kubenswrapper[5113]: I0130 00:10:37.658626 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:38 crc kubenswrapper[5113]: E0130 00:10:38.360156 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Jan 30 00:10:38 crc kubenswrapper[5113]: I0130 00:10:38.659378 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:39 crc kubenswrapper[5113]: I0130 00:10:39.659423 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:40 crc kubenswrapper[5113]: I0130 00:10:40.659442 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:40 crc kubenswrapper[5113]: E0130 00:10:40.825050 5113 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:10:41 crc kubenswrapper[5113]: I0130 00:10:41.659699 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:41 crc kubenswrapper[5113]: I0130 00:10:41.866704 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:41 crc kubenswrapper[5113]: I0130 00:10:41.869204 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:41 crc kubenswrapper[5113]: I0130 00:10:41.869282 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:41 crc kubenswrapper[5113]: I0130 00:10:41.869301 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:41 crc kubenswrapper[5113]: I0130 00:10:41.869696 5113 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:10:41 crc kubenswrapper[5113]: E0130 00:10:41.878590 5113 kubelet_node_status.go:116] "Unable to register node with API server, error getting existing node" err="nodes \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"nodes\" in API group \"\" at the cluster scope" node="crc" Jan 30 00:10:42 crc kubenswrapper[5113]: I0130 00:10:42.660512 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:43 crc kubenswrapper[5113]: I0130 00:10:43.659843 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:44 crc kubenswrapper[5113]: I0130 00:10:44.658503 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:45 crc kubenswrapper[5113]: E0130 00:10:45.366891 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="leases.coordination.k8s.io \"crc\" is forbidden: User \"system:anonymous\" cannot get resource \"leases\" in API group \"coordination.k8s.io\" in the namespace \"kube-node-lease\"" interval="7s" Jan 30 00:10:45 crc kubenswrapper[5113]: I0130 00:10:45.659134 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:46 crc kubenswrapper[5113]: I0130 00:10:46.655645 5113 csi_plugin.go:988] Failed to contact API server when waiting for CSINode publishing: csinodes.storage.k8s.io "crc" is forbidden: User "system:anonymous" cannot get resource "csinodes" in API group "storage.k8s.io" at the cluster scope Jan 30 00:10:46 crc kubenswrapper[5113]: I0130 00:10:46.930337 5113 csr.go:274] "Certificate signing request is approved, waiting to be issued" logger="kubernetes.io/kube-apiserver-client-kubelet" csr="csr-65ghv" Jan 30 00:10:46 crc kubenswrapper[5113]: I0130 00:10:46.941703 5113 csr.go:270] "Certificate signing request is issued" logger="kubernetes.io/kube-apiserver-client-kubelet" csr="csr-65ghv" Jan 30 00:10:46 crc kubenswrapper[5113]: I0130 00:10:46.952313 5113 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 30 00:10:47 crc kubenswrapper[5113]: I0130 00:10:47.456101 5113 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 30 00:10:47 crc kubenswrapper[5113]: I0130 00:10:47.772290 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:47 crc kubenswrapper[5113]: I0130 00:10:47.773407 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:47 crc kubenswrapper[5113]: I0130 00:10:47.773447 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:47 crc kubenswrapper[5113]: I0130 00:10:47.773460 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:47 crc kubenswrapper[5113]: E0130 00:10:47.773858 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:47 crc kubenswrapper[5113]: I0130 00:10:47.943278 5113 certificate_manager.go:715] "Certificate rotation deadline determined" logger="kubernetes.io/kube-apiserver-client-kubelet" expiration="2026-03-01 00:05:46 +0000 UTC" deadline="2026-02-25 07:22:00.59971044 +0000 UTC" Jan 30 00:10:47 crc kubenswrapper[5113]: I0130 00:10:47.943367 5113 certificate_manager.go:431] "Waiting for next certificate rotation" logger="kubernetes.io/kube-apiserver-client-kubelet" sleep="631h11m12.656348891s" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.879216 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.880556 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.880630 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.880643 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.880776 5113 kubelet_node_status.go:78] "Attempting to register node" node="crc" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.892356 5113 kubelet_node_status.go:127] "Node was previously registered" node="crc" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.892860 5113 kubelet_node_status.go:81] "Successfully registered node" node="crc" Jan 30 00:10:48 crc kubenswrapper[5113]: E0130 00:10:48.892900 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="error getting node \"crc\": node \"crc\" not found" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.897126 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.897196 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.897209 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.897232 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.897245 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:10:48Z","lastTransitionTime":"2026-01-30T00:10:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:10:48 crc kubenswrapper[5113]: E0130 00:10:48.911492 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400456Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861256Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"99190581-2729-474f-9d8c-bd0a6cbb9243\\\",\\\"systemUUID\\\":\\\"9bcb11f8-bcae-4366-b721-cf94fa126668\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.918864 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.918916 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.918927 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.918944 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.918960 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:10:48Z","lastTransitionTime":"2026-01-30T00:10:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:10:48 crc kubenswrapper[5113]: E0130 00:10:48.930941 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400456Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861256Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"99190581-2729-474f-9d8c-bd0a6cbb9243\\\",\\\"systemUUID\\\":\\\"9bcb11f8-bcae-4366-b721-cf94fa126668\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.941898 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.941958 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.941972 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.941994 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.942010 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:10:48Z","lastTransitionTime":"2026-01-30T00:10:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:10:48 crc kubenswrapper[5113]: E0130 00:10:48.953237 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400456Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861256Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"99190581-2729-474f-9d8c-bd0a6cbb9243\\\",\\\"systemUUID\\\":\\\"9bcb11f8-bcae-4366-b721-cf94fa126668\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.961283 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.961340 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.961351 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.961368 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:10:48 crc kubenswrapper[5113]: I0130 00:10:48.961381 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:10:48Z","lastTransitionTime":"2026-01-30T00:10:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:10:48 crc kubenswrapper[5113]: E0130 00:10:48.976176 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400456Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861256Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"99190581-2729-474f-9d8c-bd0a6cbb9243\\\",\\\"systemUUID\\\":\\\"9bcb11f8-bcae-4366-b721-cf94fa126668\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:10:48 crc kubenswrapper[5113]: E0130 00:10:48.976372 5113 kubelet_node_status.go:584] "Unable to update node status" err="update node status exceeds retry count" Jan 30 00:10:48 crc kubenswrapper[5113]: E0130 00:10:48.976409 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:49 crc kubenswrapper[5113]: E0130 00:10:49.077338 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:49 crc kubenswrapper[5113]: E0130 00:10:49.178355 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:49 crc kubenswrapper[5113]: E0130 00:10:49.279393 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:49 crc kubenswrapper[5113]: E0130 00:10:49.380502 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:49 crc kubenswrapper[5113]: E0130 00:10:49.480628 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:49 crc kubenswrapper[5113]: E0130 00:10:49.581566 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:49 crc kubenswrapper[5113]: E0130 00:10:49.682553 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:49 crc kubenswrapper[5113]: I0130 00:10:49.772195 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:49 crc kubenswrapper[5113]: I0130 00:10:49.773502 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:49 crc kubenswrapper[5113]: I0130 00:10:49.773589 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:49 crc kubenswrapper[5113]: I0130 00:10:49.773611 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:49 crc kubenswrapper[5113]: E0130 00:10:49.774330 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:49 crc kubenswrapper[5113]: I0130 00:10:49.774731 5113 scope.go:117] "RemoveContainer" containerID="f498a9356df2ed3c16c34d6b0e8d96c2b1fa9a408a2b7d8c7d896cc717d6b8ec" Jan 30 00:10:49 crc kubenswrapper[5113]: E0130 00:10:49.783131 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:49 crc kubenswrapper[5113]: E0130 00:10:49.883215 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:49 crc kubenswrapper[5113]: E0130 00:10:49.984321 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:50 crc kubenswrapper[5113]: E0130 00:10:50.084824 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:50 crc kubenswrapper[5113]: I0130 00:10:50.090254 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/2.log" Jan 30 00:10:50 crc kubenswrapper[5113]: I0130 00:10:50.092198 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"524b98e238697d411aaaba56575ee93f25656aefb79f17572a1504e3f52a32ef"} Jan 30 00:10:50 crc kubenswrapper[5113]: I0130 00:10:50.092455 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:50 crc kubenswrapper[5113]: I0130 00:10:50.093331 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:50 crc kubenswrapper[5113]: I0130 00:10:50.093381 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:50 crc kubenswrapper[5113]: I0130 00:10:50.093400 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:50 crc kubenswrapper[5113]: E0130 00:10:50.093995 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:50 crc kubenswrapper[5113]: E0130 00:10:50.185905 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:50 crc kubenswrapper[5113]: E0130 00:10:50.286024 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:50 crc kubenswrapper[5113]: E0130 00:10:50.386890 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:50 crc kubenswrapper[5113]: E0130 00:10:50.487325 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:50 crc kubenswrapper[5113]: E0130 00:10:50.587656 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:50 crc kubenswrapper[5113]: E0130 00:10:50.688350 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:50 crc kubenswrapper[5113]: E0130 00:10:50.789018 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:50 crc kubenswrapper[5113]: E0130 00:10:50.825847 5113 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:10:50 crc kubenswrapper[5113]: E0130 00:10:50.889856 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:50 crc kubenswrapper[5113]: E0130 00:10:50.990448 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:51 crc kubenswrapper[5113]: E0130 00:10:51.090611 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:51 crc kubenswrapper[5113]: E0130 00:10:51.191501 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:51 crc kubenswrapper[5113]: E0130 00:10:51.292400 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:51 crc kubenswrapper[5113]: E0130 00:10:51.393336 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:51 crc kubenswrapper[5113]: E0130 00:10:51.493456 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:51 crc kubenswrapper[5113]: E0130 00:10:51.593567 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:51 crc kubenswrapper[5113]: E0130 00:10:51.694086 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:51 crc kubenswrapper[5113]: E0130 00:10:51.794823 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:51 crc kubenswrapper[5113]: E0130 00:10:51.895900 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:51 crc kubenswrapper[5113]: E0130 00:10:51.996966 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:52 crc kubenswrapper[5113]: E0130 00:10:52.097085 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:52 crc kubenswrapper[5113]: I0130 00:10:52.099054 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/3.log" Jan 30 00:10:52 crc kubenswrapper[5113]: I0130 00:10:52.099534 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/2.log" Jan 30 00:10:52 crc kubenswrapper[5113]: I0130 00:10:52.101154 5113 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="524b98e238697d411aaaba56575ee93f25656aefb79f17572a1504e3f52a32ef" exitCode=255 Jan 30 00:10:52 crc kubenswrapper[5113]: I0130 00:10:52.101214 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerDied","Data":"524b98e238697d411aaaba56575ee93f25656aefb79f17572a1504e3f52a32ef"} Jan 30 00:10:52 crc kubenswrapper[5113]: I0130 00:10:52.101263 5113 scope.go:117] "RemoveContainer" containerID="f498a9356df2ed3c16c34d6b0e8d96c2b1fa9a408a2b7d8c7d896cc717d6b8ec" Jan 30 00:10:52 crc kubenswrapper[5113]: I0130 00:10:52.101657 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:10:52 crc kubenswrapper[5113]: I0130 00:10:52.102315 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:52 crc kubenswrapper[5113]: I0130 00:10:52.102358 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:52 crc kubenswrapper[5113]: I0130 00:10:52.102410 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:52 crc kubenswrapper[5113]: E0130 00:10:52.102961 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:10:52 crc kubenswrapper[5113]: I0130 00:10:52.103315 5113 scope.go:117] "RemoveContainer" containerID="524b98e238697d411aaaba56575ee93f25656aefb79f17572a1504e3f52a32ef" Jan 30 00:10:52 crc kubenswrapper[5113]: E0130 00:10:52.103605 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:10:52 crc kubenswrapper[5113]: E0130 00:10:52.197474 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:52 crc kubenswrapper[5113]: E0130 00:10:52.298294 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:52 crc kubenswrapper[5113]: E0130 00:10:52.399347 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:52 crc kubenswrapper[5113]: E0130 00:10:52.500472 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:52 crc kubenswrapper[5113]: E0130 00:10:52.601598 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:52 crc kubenswrapper[5113]: E0130 00:10:52.701884 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:52 crc kubenswrapper[5113]: E0130 00:10:52.802196 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:52 crc kubenswrapper[5113]: E0130 00:10:52.902387 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:53 crc kubenswrapper[5113]: E0130 00:10:53.003432 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:53 crc kubenswrapper[5113]: E0130 00:10:53.103538 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:53 crc kubenswrapper[5113]: I0130 00:10:53.105561 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/3.log" Jan 30 00:10:53 crc kubenswrapper[5113]: E0130 00:10:53.204367 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:53 crc kubenswrapper[5113]: E0130 00:10:53.305291 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:53 crc kubenswrapper[5113]: E0130 00:10:53.405385 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:53 crc kubenswrapper[5113]: E0130 00:10:53.506360 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:53 crc kubenswrapper[5113]: E0130 00:10:53.607135 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:53 crc kubenswrapper[5113]: E0130 00:10:53.708228 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:53 crc kubenswrapper[5113]: E0130 00:10:53.809280 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:53 crc kubenswrapper[5113]: E0130 00:10:53.909684 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:54 crc kubenswrapper[5113]: E0130 00:10:54.010228 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:54 crc kubenswrapper[5113]: E0130 00:10:54.110760 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:54 crc kubenswrapper[5113]: E0130 00:10:54.211020 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:54 crc kubenswrapper[5113]: E0130 00:10:54.311334 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:54 crc kubenswrapper[5113]: E0130 00:10:54.412188 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:54 crc kubenswrapper[5113]: E0130 00:10:54.516187 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:54 crc kubenswrapper[5113]: E0130 00:10:54.616707 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:54 crc kubenswrapper[5113]: E0130 00:10:54.717142 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:54 crc kubenswrapper[5113]: E0130 00:10:54.818278 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:54 crc kubenswrapper[5113]: E0130 00:10:54.919123 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:55 crc kubenswrapper[5113]: E0130 00:10:55.020363 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:55 crc kubenswrapper[5113]: E0130 00:10:55.121470 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:55 crc kubenswrapper[5113]: E0130 00:10:55.221804 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:55 crc kubenswrapper[5113]: E0130 00:10:55.322044 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:55 crc kubenswrapper[5113]: E0130 00:10:55.423217 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:55 crc kubenswrapper[5113]: E0130 00:10:55.523970 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:55 crc kubenswrapper[5113]: E0130 00:10:55.625040 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:55 crc kubenswrapper[5113]: E0130 00:10:55.725575 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:55 crc kubenswrapper[5113]: E0130 00:10:55.826630 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:55 crc kubenswrapper[5113]: E0130 00:10:55.926854 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:56 crc kubenswrapper[5113]: E0130 00:10:56.027815 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:56 crc kubenswrapper[5113]: E0130 00:10:56.128181 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:56 crc kubenswrapper[5113]: E0130 00:10:56.228653 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:56 crc kubenswrapper[5113]: E0130 00:10:56.330017 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:56 crc kubenswrapper[5113]: E0130 00:10:56.430210 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:56 crc kubenswrapper[5113]: E0130 00:10:56.530448 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:56 crc kubenswrapper[5113]: E0130 00:10:56.631809 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:56 crc kubenswrapper[5113]: E0130 00:10:56.732380 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:56 crc kubenswrapper[5113]: E0130 00:10:56.833770 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:56 crc kubenswrapper[5113]: E0130 00:10:56.934599 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:57 crc kubenswrapper[5113]: E0130 00:10:57.035411 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:57 crc kubenswrapper[5113]: E0130 00:10:57.136553 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:57 crc kubenswrapper[5113]: E0130 00:10:57.237427 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:57 crc kubenswrapper[5113]: E0130 00:10:57.338680 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:57 crc kubenswrapper[5113]: E0130 00:10:57.439067 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:57 crc kubenswrapper[5113]: E0130 00:10:57.539378 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:57 crc kubenswrapper[5113]: E0130 00:10:57.640171 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:57 crc kubenswrapper[5113]: E0130 00:10:57.741284 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:57 crc kubenswrapper[5113]: E0130 00:10:57.841594 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:57 crc kubenswrapper[5113]: E0130 00:10:57.942736 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:58 crc kubenswrapper[5113]: E0130 00:10:58.043668 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:58 crc kubenswrapper[5113]: E0130 00:10:58.144286 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:58 crc kubenswrapper[5113]: E0130 00:10:58.245152 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:58 crc kubenswrapper[5113]: E0130 00:10:58.345603 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:58 crc kubenswrapper[5113]: E0130 00:10:58.446563 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:58 crc kubenswrapper[5113]: E0130 00:10:58.547502 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:58 crc kubenswrapper[5113]: E0130 00:10:58.648719 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:58 crc kubenswrapper[5113]: E0130 00:10:58.749719 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:58 crc kubenswrapper[5113]: E0130 00:10:58.850590 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:58 crc kubenswrapper[5113]: E0130 00:10:58.951099 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:59 crc kubenswrapper[5113]: E0130 00:10:59.052181 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:59 crc kubenswrapper[5113]: E0130 00:10:59.152957 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:59 crc kubenswrapper[5113]: E0130 00:10:59.243372 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="error getting node \"crc\": node \"crc\" not found" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.248062 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.248120 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.248140 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.248164 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.248232 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:10:59Z","lastTransitionTime":"2026-01-30T00:10:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:10:59 crc kubenswrapper[5113]: E0130 00:10:59.265727 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400456Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861256Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"99190581-2729-474f-9d8c-bd0a6cbb9243\\\",\\\"systemUUID\\\":\\\"9bcb11f8-bcae-4366-b721-cf94fa126668\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.276516 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.276630 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.276652 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.276675 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.276694 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:10:59Z","lastTransitionTime":"2026-01-30T00:10:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:10:59 crc kubenswrapper[5113]: E0130 00:10:59.292710 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400456Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861256Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"99190581-2729-474f-9d8c-bd0a6cbb9243\\\",\\\"systemUUID\\\":\\\"9bcb11f8-bcae-4366-b721-cf94fa126668\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.303586 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.303642 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.303662 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.303686 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.303708 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:10:59Z","lastTransitionTime":"2026-01-30T00:10:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:10:59 crc kubenswrapper[5113]: E0130 00:10:59.319144 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400456Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861256Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"99190581-2729-474f-9d8c-bd0a6cbb9243\\\",\\\"systemUUID\\\":\\\"9bcb11f8-bcae-4366-b721-cf94fa126668\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.330617 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.330683 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.330709 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.330739 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:10:59 crc kubenswrapper[5113]: I0130 00:10:59.330761 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:10:59Z","lastTransitionTime":"2026-01-30T00:10:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:10:59 crc kubenswrapper[5113]: E0130 00:10:59.345758 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400456Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861256Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"99190581-2729-474f-9d8c-bd0a6cbb9243\\\",\\\"systemUUID\\\":\\\"9bcb11f8-bcae-4366-b721-cf94fa126668\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:10:59 crc kubenswrapper[5113]: E0130 00:10:59.346016 5113 kubelet_node_status.go:584] "Unable to update node status" err="update node status exceeds retry count" Jan 30 00:10:59 crc kubenswrapper[5113]: E0130 00:10:59.346065 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:59 crc kubenswrapper[5113]: E0130 00:10:59.446392 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:59 crc kubenswrapper[5113]: E0130 00:10:59.547231 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:59 crc kubenswrapper[5113]: E0130 00:10:59.647764 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:59 crc kubenswrapper[5113]: E0130 00:10:59.748832 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:59 crc kubenswrapper[5113]: E0130 00:10:59.850208 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:10:59 crc kubenswrapper[5113]: E0130 00:10:59.951300 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:00 crc kubenswrapper[5113]: E0130 00:11:00.051724 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:00 crc kubenswrapper[5113]: I0130 00:11:00.093469 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:11:00 crc kubenswrapper[5113]: I0130 00:11:00.094111 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:11:00 crc kubenswrapper[5113]: I0130 00:11:00.095407 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:00 crc kubenswrapper[5113]: I0130 00:11:00.095493 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:00 crc kubenswrapper[5113]: I0130 00:11:00.095518 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:00 crc kubenswrapper[5113]: E0130 00:11:00.096412 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:11:00 crc kubenswrapper[5113]: I0130 00:11:00.096935 5113 scope.go:117] "RemoveContainer" containerID="524b98e238697d411aaaba56575ee93f25656aefb79f17572a1504e3f52a32ef" Jan 30 00:11:00 crc kubenswrapper[5113]: E0130 00:11:00.097375 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:11:00 crc kubenswrapper[5113]: E0130 00:11:00.151824 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:00 crc kubenswrapper[5113]: E0130 00:11:00.252319 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:00 crc kubenswrapper[5113]: E0130 00:11:00.352769 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:00 crc kubenswrapper[5113]: E0130 00:11:00.452926 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:00 crc kubenswrapper[5113]: E0130 00:11:00.554006 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:00 crc kubenswrapper[5113]: E0130 00:11:00.654695 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:00 crc kubenswrapper[5113]: E0130 00:11:00.754852 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:00 crc kubenswrapper[5113]: E0130 00:11:00.826490 5113 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:11:00 crc kubenswrapper[5113]: E0130 00:11:00.855890 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:00 crc kubenswrapper[5113]: E0130 00:11:00.957225 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:01 crc kubenswrapper[5113]: E0130 00:11:01.057616 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:01 crc kubenswrapper[5113]: E0130 00:11:01.158241 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:01 crc kubenswrapper[5113]: E0130 00:11:01.258606 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:01 crc kubenswrapper[5113]: E0130 00:11:01.359593 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:01 crc kubenswrapper[5113]: E0130 00:11:01.460812 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:01 crc kubenswrapper[5113]: I0130 00:11:01.511880 5113 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:11:01 crc kubenswrapper[5113]: I0130 00:11:01.512247 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:11:01 crc kubenswrapper[5113]: I0130 00:11:01.513499 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:01 crc kubenswrapper[5113]: I0130 00:11:01.513608 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:01 crc kubenswrapper[5113]: I0130 00:11:01.513625 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:01 crc kubenswrapper[5113]: E0130 00:11:01.514232 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:11:01 crc kubenswrapper[5113]: I0130 00:11:01.514589 5113 scope.go:117] "RemoveContainer" containerID="524b98e238697d411aaaba56575ee93f25656aefb79f17572a1504e3f52a32ef" Jan 30 00:11:01 crc kubenswrapper[5113]: E0130 00:11:01.514870 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:11:01 crc kubenswrapper[5113]: E0130 00:11:01.561137 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:01 crc kubenswrapper[5113]: E0130 00:11:01.661789 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:01 crc kubenswrapper[5113]: E0130 00:11:01.762665 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:01 crc kubenswrapper[5113]: E0130 00:11:01.863295 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:01 crc kubenswrapper[5113]: E0130 00:11:01.963722 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:02 crc kubenswrapper[5113]: E0130 00:11:02.064278 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:02 crc kubenswrapper[5113]: E0130 00:11:02.165167 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:02 crc kubenswrapper[5113]: E0130 00:11:02.265343 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:02 crc kubenswrapper[5113]: E0130 00:11:02.365708 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:02 crc kubenswrapper[5113]: E0130 00:11:02.466777 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:02 crc kubenswrapper[5113]: E0130 00:11:02.567468 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:02 crc kubenswrapper[5113]: E0130 00:11:02.668549 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:02 crc kubenswrapper[5113]: E0130 00:11:02.769675 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:02 crc kubenswrapper[5113]: E0130 00:11:02.870312 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:02 crc kubenswrapper[5113]: E0130 00:11:02.971547 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:03 crc kubenswrapper[5113]: E0130 00:11:03.072102 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:03 crc kubenswrapper[5113]: E0130 00:11:03.172872 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:03 crc kubenswrapper[5113]: E0130 00:11:03.273496 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:03 crc kubenswrapper[5113]: E0130 00:11:03.374498 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:03 crc kubenswrapper[5113]: E0130 00:11:03.474820 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:03 crc kubenswrapper[5113]: E0130 00:11:03.575680 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:03 crc kubenswrapper[5113]: E0130 00:11:03.676800 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:03 crc kubenswrapper[5113]: E0130 00:11:03.778010 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:03 crc kubenswrapper[5113]: E0130 00:11:03.878717 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:03 crc kubenswrapper[5113]: E0130 00:11:03.979686 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5113]: E0130 00:11:04.079832 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5113]: E0130 00:11:04.180458 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5113]: E0130 00:11:04.281168 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5113]: E0130 00:11:04.382281 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5113]: E0130 00:11:04.482644 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5113]: E0130 00:11:04.583742 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5113]: E0130 00:11:04.683957 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5113]: E0130 00:11:04.784463 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5113]: E0130 00:11:04.885278 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:04 crc kubenswrapper[5113]: E0130 00:11:04.986096 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5113]: E0130 00:11:05.086652 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5113]: E0130 00:11:05.187730 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5113]: E0130 00:11:05.288594 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5113]: E0130 00:11:05.388783 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5113]: E0130 00:11:05.489830 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5113]: I0130 00:11:05.532718 5113 reflector.go:430] "Caches populated" type="*v1.RuntimeClass" reflector="k8s.io/client-go/informers/factory.go:160" Jan 30 00:11:05 crc kubenswrapper[5113]: E0130 00:11:05.590644 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5113]: E0130 00:11:05.691490 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5113]: E0130 00:11:05.791846 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5113]: E0130 00:11:05.892302 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:05 crc kubenswrapper[5113]: E0130 00:11:05.993269 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5113]: E0130 00:11:06.094258 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5113]: E0130 00:11:06.195027 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5113]: E0130 00:11:06.295284 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5113]: E0130 00:11:06.396403 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5113]: E0130 00:11:06.496749 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5113]: E0130 00:11:06.597620 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5113]: E0130 00:11:06.698385 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5113]: E0130 00:11:06.798881 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:06 crc kubenswrapper[5113]: E0130 00:11:06.899247 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5113]: E0130 00:11:07.000362 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5113]: E0130 00:11:07.100901 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5113]: E0130 00:11:07.201019 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5113]: E0130 00:11:07.301143 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5113]: E0130 00:11:07.401824 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5113]: E0130 00:11:07.502031 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5113]: E0130 00:11:07.603146 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5113]: E0130 00:11:07.704099 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5113]: E0130 00:11:07.804920 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:07 crc kubenswrapper[5113]: E0130 00:11:07.905798 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5113]: E0130 00:11:08.006991 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5113]: E0130 00:11:08.108076 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5113]: E0130 00:11:08.208852 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5113]: E0130 00:11:08.309821 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5113]: E0130 00:11:08.410275 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5113]: E0130 00:11:08.511258 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5113]: E0130 00:11:08.612291 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5113]: E0130 00:11:08.713012 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5113]: E0130 00:11:08.813222 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:08 crc kubenswrapper[5113]: E0130 00:11:08.913698 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5113]: E0130 00:11:09.014204 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5113]: E0130 00:11:09.114993 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5113]: E0130 00:11:09.216008 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5113]: E0130 00:11:09.316395 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5113]: E0130 00:11:09.417194 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5113]: E0130 00:11:09.517875 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5113]: E0130 00:11:09.542191 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="error getting node \"crc\": node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.546657 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.546703 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.546715 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.546732 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.546744 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:09Z","lastTransitionTime":"2026-01-30T00:11:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:09 crc kubenswrapper[5113]: E0130 00:11:09.560771 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400456Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861256Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"99190581-2729-474f-9d8c-bd0a6cbb9243\\\",\\\"systemUUID\\\":\\\"9bcb11f8-bcae-4366-b721-cf94fa126668\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.565012 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.565041 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.565050 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.565063 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.565072 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:09Z","lastTransitionTime":"2026-01-30T00:11:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:09 crc kubenswrapper[5113]: E0130 00:11:09.579583 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400456Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861256Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"99190581-2729-474f-9d8c-bd0a6cbb9243\\\",\\\"systemUUID\\\":\\\"9bcb11f8-bcae-4366-b721-cf94fa126668\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.584440 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.584498 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.584516 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.584596 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.584634 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:09Z","lastTransitionTime":"2026-01-30T00:11:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:09 crc kubenswrapper[5113]: E0130 00:11:09.598459 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400456Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861256Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"99190581-2729-474f-9d8c-bd0a6cbb9243\\\",\\\"systemUUID\\\":\\\"9bcb11f8-bcae-4366-b721-cf94fa126668\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.602558 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.602627 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.602638 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.602652 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:09 crc kubenswrapper[5113]: I0130 00:11:09.602661 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:09Z","lastTransitionTime":"2026-01-30T00:11:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:09 crc kubenswrapper[5113]: E0130 00:11:09.612582 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400456Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861256Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"99190581-2729-474f-9d8c-bd0a6cbb9243\\\",\\\"systemUUID\\\":\\\"9bcb11f8-bcae-4366-b721-cf94fa126668\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:09 crc kubenswrapper[5113]: E0130 00:11:09.612737 5113 kubelet_node_status.go:584] "Unable to update node status" err="update node status exceeds retry count" Jan 30 00:11:09 crc kubenswrapper[5113]: E0130 00:11:09.618569 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5113]: E0130 00:11:09.719291 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5113]: E0130 00:11:09.820483 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:09 crc kubenswrapper[5113]: E0130 00:11:09.921149 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5113]: E0130 00:11:10.022145 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5113]: E0130 00:11:10.123195 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5113]: E0130 00:11:10.223572 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5113]: E0130 00:11:10.324773 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5113]: E0130 00:11:10.425581 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5113]: E0130 00:11:10.526224 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5113]: E0130 00:11:10.626897 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5113]: E0130 00:11:10.727928 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5113]: E0130 00:11:10.827504 5113 eviction_manager.go:292] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5113]: E0130 00:11:10.828540 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:10 crc kubenswrapper[5113]: E0130 00:11:10.928991 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5113]: E0130 00:11:11.029596 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5113]: E0130 00:11:11.130181 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5113]: E0130 00:11:11.230596 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5113]: E0130 00:11:11.331261 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5113]: E0130 00:11:11.432293 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5113]: E0130 00:11:11.533379 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5113]: E0130 00:11:11.634467 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5113]: E0130 00:11:11.735286 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5113]: E0130 00:11:11.836303 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:11 crc kubenswrapper[5113]: I0130 00:11:11.839805 5113 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" Jan 30 00:11:11 crc kubenswrapper[5113]: E0130 00:11:11.937418 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5113]: E0130 00:11:12.038209 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5113]: E0130 00:11:12.138866 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5113]: E0130 00:11:12.239442 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5113]: E0130 00:11:12.340593 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5113]: E0130 00:11:12.441464 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5113]: E0130 00:11:12.542468 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5113]: E0130 00:11:12.643362 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5113]: E0130 00:11:12.744098 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5113]: I0130 00:11:12.772921 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:11:12 crc kubenswrapper[5113]: I0130 00:11:12.773998 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:12 crc kubenswrapper[5113]: I0130 00:11:12.774029 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:12 crc kubenswrapper[5113]: I0130 00:11:12.774041 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:12 crc kubenswrapper[5113]: E0130 00:11:12.774502 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:11:12 crc kubenswrapper[5113]: E0130 00:11:12.844435 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:12 crc kubenswrapper[5113]: E0130 00:11:12.944559 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5113]: E0130 00:11:13.045604 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5113]: E0130 00:11:13.146063 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5113]: E0130 00:11:13.247071 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5113]: E0130 00:11:13.348037 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5113]: E0130 00:11:13.449057 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5113]: E0130 00:11:13.549370 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5113]: E0130 00:11:13.649597 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5113]: I0130 00:11:13.699020 5113 reflector.go:430] "Caches populated" type="*v1.CSIDriver" reflector="k8s.io/client-go/informers/factory.go:160" Jan 30 00:11:13 crc kubenswrapper[5113]: E0130 00:11:13.750806 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5113]: E0130 00:11:13.851587 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:13 crc kubenswrapper[5113]: E0130 00:11:13.952782 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5113]: E0130 00:11:14.053947 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5113]: E0130 00:11:14.154557 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5113]: E0130 00:11:14.255472 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5113]: E0130 00:11:14.356460 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5113]: E0130 00:11:14.456663 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5113]: E0130 00:11:14.557784 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5113]: E0130 00:11:14.658014 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5113]: E0130 00:11:14.759057 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5113]: I0130 00:11:14.772683 5113 kubelet_node_status.go:413] "Setting node annotation to enable volume controller attach/detach" Jan 30 00:11:14 crc kubenswrapper[5113]: I0130 00:11:14.774096 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:14 crc kubenswrapper[5113]: I0130 00:11:14.774153 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:14 crc kubenswrapper[5113]: I0130 00:11:14.774166 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:14 crc kubenswrapper[5113]: E0130 00:11:14.774805 5113 kubelet.go:3336] "No need to create a mirror pod, since failed to get node info from the cluster" err="node \"crc\" not found" node="crc" Jan 30 00:11:14 crc kubenswrapper[5113]: I0130 00:11:14.775091 5113 scope.go:117] "RemoveContainer" containerID="524b98e238697d411aaaba56575ee93f25656aefb79f17572a1504e3f52a32ef" Jan 30 00:11:14 crc kubenswrapper[5113]: E0130 00:11:14.775353 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:11:14 crc kubenswrapper[5113]: E0130 00:11:14.860091 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:14 crc kubenswrapper[5113]: E0130 00:11:14.960249 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5113]: E0130 00:11:15.060724 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5113]: E0130 00:11:15.161180 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5113]: E0130 00:11:15.261618 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5113]: E0130 00:11:15.362332 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5113]: E0130 00:11:15.463694 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5113]: E0130 00:11:15.564092 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5113]: E0130 00:11:15.665236 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5113]: E0130 00:11:15.766153 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5113]: E0130 00:11:15.866608 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:15 crc kubenswrapper[5113]: E0130 00:11:15.967455 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5113]: E0130 00:11:16.067608 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5113]: E0130 00:11:16.168386 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5113]: E0130 00:11:16.269501 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5113]: E0130 00:11:16.370696 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5113]: E0130 00:11:16.471709 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5113]: E0130 00:11:16.571870 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5113]: E0130 00:11:16.672042 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5113]: E0130 00:11:16.772186 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5113]: E0130 00:11:16.872517 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:16 crc kubenswrapper[5113]: E0130 00:11:16.973175 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5113]: E0130 00:11:17.073304 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5113]: E0130 00:11:17.174448 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5113]: E0130 00:11:17.275572 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5113]: E0130 00:11:17.375950 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5113]: E0130 00:11:17.476485 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5113]: E0130 00:11:17.577135 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5113]: E0130 00:11:17.678156 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5113]: E0130 00:11:17.779296 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5113]: E0130 00:11:17.880174 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:17 crc kubenswrapper[5113]: E0130 00:11:17.981295 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5113]: E0130 00:11:18.082643 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5113]: E0130 00:11:18.184335 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5113]: E0130 00:11:18.284759 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5113]: E0130 00:11:18.385900 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5113]: E0130 00:11:18.486174 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5113]: E0130 00:11:18.587300 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5113]: E0130 00:11:18.688364 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5113]: E0130 00:11:18.789003 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5113]: E0130 00:11:18.889971 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:18 crc kubenswrapper[5113]: E0130 00:11:18.990801 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5113]: E0130 00:11:19.091058 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5113]: E0130 00:11:19.191473 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5113]: E0130 00:11:19.292651 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5113]: E0130 00:11:19.393716 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5113]: E0130 00:11:19.494668 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5113]: E0130 00:11:19.595874 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5113]: E0130 00:11:19.696943 5113 kubelet_node_status.go:515] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.769956 5113 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.799844 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.799885 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.799895 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.799913 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.799925 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:19Z","lastTransitionTime":"2026-01-30T00:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.801943 5113 kubelet.go:3340] "Creating a mirror pod for static pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.818897 5113 kubelet.go:3340] "Creating a mirror pod for static pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.901991 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.902042 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.902053 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.902071 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.902084 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:19Z","lastTransitionTime":"2026-01-30T00:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.919998 5113 kubelet.go:3340] "Creating a mirror pod for static pod" pod="openshift-etcd/etcd-crc" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.922175 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.922211 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.922221 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.922235 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.922247 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:19Z","lastTransitionTime":"2026-01-30T00:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:19 crc kubenswrapper[5113]: E0130 00:11:19.932396 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400456Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861256Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"99190581-2729-474f-9d8c-bd0a6cbb9243\\\",\\\"systemUUID\\\":\\\"9bcb11f8-bcae-4366-b721-cf94fa126668\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.935712 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.935764 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.935781 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.935807 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.935826 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:19Z","lastTransitionTime":"2026-01-30T00:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:19 crc kubenswrapper[5113]: E0130 00:11:19.947195 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400456Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861256Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"99190581-2729-474f-9d8c-bd0a6cbb9243\\\",\\\"systemUUID\\\":\\\"9bcb11f8-bcae-4366-b721-cf94fa126668\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.950480 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.950513 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.950527 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.950554 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.950565 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:19Z","lastTransitionTime":"2026-01-30T00:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:19 crc kubenswrapper[5113]: E0130 00:11:19.960632 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400456Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861256Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"99190581-2729-474f-9d8c-bd0a6cbb9243\\\",\\\"systemUUID\\\":\\\"9bcb11f8-bcae-4366-b721-cf94fa126668\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.963768 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.963810 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.963822 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.963842 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.963856 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:19Z","lastTransitionTime":"2026-01-30T00:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:19 crc kubenswrapper[5113]: E0130 00:11:19.977131 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400456Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861256Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"99190581-2729-474f-9d8c-bd0a6cbb9243\\\",\\\"systemUUID\\\":\\\"9bcb11f8-bcae-4366-b721-cf94fa126668\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.981157 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.981205 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.981219 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.981239 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:19 crc kubenswrapper[5113]: I0130 00:11:19.981253 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:19Z","lastTransitionTime":"2026-01-30T00:11:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:19 crc kubenswrapper[5113]: E0130 00:11:19.993218 5113 kubelet_node_status.go:597] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32400456Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32861256Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c8a088031661d94022418e93fb63744c38e1c4cff93ea3b95c096a290c2b7a3\\\"],\\\"sizeBytes\\\":2981840865},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\"],\\\"sizeBytes\\\":1641503854},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:286bb0beab328954b0a86b7f066fd5a843b462d6acb2812df7ec788015cd32d4\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:be02784ed82978c399102be1c6c9f2ca441be4d984e0fd7100c155dd4417ebbf\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1597684406},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\"],\\\"sizeBytes\\\":1261384762},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:0d50962980a5aeecae2d99c98913fb0f46940164e41de0af2ba0e3dafe0d9017\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:8d607fb6cc75ca36bca1e0a9c5bea5d1919b75db20733df69c64c8a10ee8083d\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1224304325},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:541db5b20a3d2199602b3b5ac80f09ea31498034e9ae3841238b03a39150f0d7\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:a4c5df55584cba56f00004a090923a5c6de2071add5eb1672a5e20aa646aad8c\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.20\\\"],\\\"sizeBytes\\\":1126957757},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:b1c859067d6b7b785ab4977ed7137c5b3bb257234f7d7737a1d2836cef1576b5\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:df08951924aa23b2333436a1d04b2dba56c366bb4f09d39ae3aedb980e4fb909\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.20\\\"],\\\"sizeBytes\\\":1079537324},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\"],\\\"sizeBytes\\\":1052707833},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d1a1e4abe0326c3af89e9eaa4b7449dd2d5b6f9403c677e19b00b24947b1df9\\\"],\\\"sizeBytes\\\":989392005},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b2b1fc3d5bb4944cbd5b23b87566d7ba24b1b66f5a0465f76bcc05023191cc47\\\"],\\\"sizeBytes\\\":971668163},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\"],\\\"sizeBytes\\\":969078739},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3d6c8802ae53d6aecf38aa7b560d7892193806bdeb3d7c1637fac77c47fd1f\\\"],\\\"sizeBytes\\\":876488654},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\"],\\\"sizeBytes\\\":847332502},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:36c4867005702f0c4cbfcfa33f18a98596a6c9b1340b633c85ccef84a0c4f889\\\"],\\\"sizeBytes\\\":769516783},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\"],\\\"sizeBytes\\\":721591926},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\"],\\\"sizeBytes\\\":646867625},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\"],\\\"sizeBytes\\\":638910445},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae245c97fc463e876c3024efb806fa8f4efb13b3f06f1bdd3e7e1447f5a5dce4\\\"],\\\"sizeBytes\\\":617699779},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4926e304011637ca9df370a193896d685f0f3ffabbec234ec827abdbeb083f9\\\"],\\\"sizeBytes\\\":607756695},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\"],\\\"sizeBytes\\\":584721741},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\"],\\\"sizeBytes\\\":545674969},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:574d49b89604b8e8103abf57feee77812fe8cf441eafc17fdff95d57ca80645e\\\"],\\\"sizeBytes\\\":542463064},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\"],\\\"sizeBytes\\\":539380592},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9506bdcf97d5200cf2cf4cdf110aebafdd141a24f6589bf1e1cfe27bb7fc1ed2\\\"],\\\"sizeBytes\\\":533027808},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\"],\\\"sizeBytes\\\":528200501},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\"],\\\"sizeBytes\\\":527774342},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e5e8108294b086fdb797365e5a46badba9b3d866bdcddc8460a51e05a253753d\\\"],\\\"sizeBytes\\\":526632426},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5827f6ae3beb4853192e02cc18890467bd251b33070f36f9a105991e7e6d3c9b\\\"],\\\"sizeBytes\\\":522490210},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:66c8fe5d45ff249643dae75185dd2787ea1b0ae87d5699a8222149c07689557c\\\"],\\\"sizeBytes\\\":520141094},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:baf975b6944f2844860c440636e0d4b80b2fdc473d30f32ae7d6989f2fc2b135\\\"],\\\"sizeBytes\\\":519815758},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:765cf9c3ebf4df049ebc022beaaf52f52852cf89fb802034536ad91dd45db807\\\"],\\\"sizeBytes\\\":519539350},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:52e442bc8198ac925caff87ddd35b3107b7375d5afc9c2eb041ca4e79db72c6f\\\"],\\\"sizeBytes\\\":518690683},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:43b0e0b7e1955ee905e48799a62f50b8a8df553190415ce1f5550375c2507ca5\\\"],\\\"sizeBytes\\\":518251952},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:977a316fa3598eb575a4477dafc09bbf06fad21c4ec2867052225d74f2a9f366\\\"],\\\"sizeBytes\\\":511136541},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\"],\\\"sizeBytes\\\":510122097},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dbd8603d717c26901bcf9731b1e0392ae4bc08a270ed1eeb45839e44bed9607d\\\"],\\\"sizeBytes\\\":508941917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\"],\\\"sizeBytes\\\":508318343},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7a726c68cebc9b08edd734a8bae5150ae5950f7734fe9b9c2a6e0d06f21cc095\\\"],\\\"sizeBytes\\\":498380948},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:82501261b9c63012ba3b83fe4d6703c0af5eb9c9151670eb90ae480b9507d761\\\"],\\\"sizeBytes\\\":497232440},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:4e4239621caed0b0d9132d167403631e9af86be9a395977f013e201ead281bb4\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:c0b1bec73fdb6853eb3bd9e9733aee2d760ca09a33cfd94adf9ab7b706e83fa9\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":491224335},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0f7abf2f97afd1127d9245d764338c6047bac1711b2cee43112570a85946360\\\"],\\\"sizeBytes\\\":490381192},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:21b12ff0c81c1d535e7c31aff3a73b1e9ca763e5f88037f59ade0dfab6ed8946\\\"],\\\"sizeBytes\\\":482632652},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:036ed6efe4cb5f5b90ee7f9ef5297c8591b8d67aa36b3c58b4fc5417622a140c\\\"],\\\"sizeBytes\\\":477561861},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0fe5a041a2b99d736e82f1b4a6cd9792c5e23ded475e9f0742cd19234070f989\\\"],\\\"sizeBytes\\\":475327956},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\"],\\\"sizeBytes\\\":475137830},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2599f32933f5fea6066ede54ad8f6150adb7bd9067892f251d5913121d5c630d\\\"],\\\"sizeBytes\\\":472771950},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651bbe9d418f49c2c889d731df67cf5d88dff59dc03f5a1b5d4c8bb3ae001f1a\\\"],\\\"sizeBytes\\\":469976318},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4fe612a1572df462d6a4b664a10bc2e6cad239648acbf8c0303f8fca5d2596c0\\\"],\\\"sizeBytes\\\":468393024},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a5bb05344dd2296077f5066e908ede0eea23f5a12fb78ef86a9513c88d3faaca\\\"],\\\"sizeBytes\\\":464375011},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\"],\\\"sizeBytes\\\":462844959}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"99190581-2729-474f-9d8c-bd0a6cbb9243\\\",\\\"systemUUID\\\":\\\"9bcb11f8-bcae-4366-b721-cf94fa126668\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:19 crc kubenswrapper[5113]: E0130 00:11:19.993920 5113 kubelet_node_status.go:584] "Unable to update node status" err="update node status exceeds retry count" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.004978 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.005188 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.005401 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.005806 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.005958 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:20Z","lastTransitionTime":"2026-01-30T00:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.021009 5113 kubelet.go:3340] "Creating a mirror pod for static pod" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.108992 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.109057 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.109069 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.109092 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.109107 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:20Z","lastTransitionTime":"2026-01-30T00:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.120762 5113 kubelet.go:3340] "Creating a mirror pod for static pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.212085 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.212142 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.212161 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.212187 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.212206 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:20Z","lastTransitionTime":"2026-01-30T00:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.314816 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.314886 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.314905 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.314929 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.314950 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:20Z","lastTransitionTime":"2026-01-30T00:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.417610 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.417679 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.417698 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.417721 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.417739 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:20Z","lastTransitionTime":"2026-01-30T00:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.520307 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.520737 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.520898 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.521112 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.521335 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:20Z","lastTransitionTime":"2026-01-30T00:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.624034 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.624407 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.624603 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.624822 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.624985 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:20Z","lastTransitionTime":"2026-01-30T00:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.694463 5113 apiserver.go:52] "Watching apiserver" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.704215 5113 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="pkg/kubelet/config/apiserver.go:66" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.704975 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-machine-config-operator/machine-config-daemon-gxph5","openshift-multus/multus-additional-cni-plugins-t4r5k","openshift-multus/multus-mbd62","openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6","openshift-kube-apiserver/kube-apiserver-crc","openshift-multus/network-metrics-daemon-qx4gj","openshift-network-diagnostics/network-check-target-fhkjl","openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx","openshift-dns/node-resolver-67v4x","openshift-etcd/etcd-crc","openshift-image-registry/node-ca-4q767","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-network-node-identity/network-node-identity-dgvkt","openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv","openshift-ovn-kubernetes/ovnkube-node-724qr","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5","openshift-network-operator/iptables-alerter-5jnd7"] Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.706173 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.712148 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.715618 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-network-operator\"/\"metrics-tls\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.715829 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.714239 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:20 crc kubenswrapper[5113]: E0130 00:11:20.715974 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" podUID="f863fff9-286a-45fa-b8f0-8a86994b8440" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.716292 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.719376 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-node-identity\"/\"kube-root-ca.crt\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.719669 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-node-identity\"/\"openshift-service-ca.crt\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.719699 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-network-node-identity\"/\"network-node-identity-cert\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.719563 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-node-identity\"/\"env-overrides\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.720029 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-node-identity\"/\"ovnkube-identity-cm\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.721288 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.722701 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:20 crc kubenswrapper[5113]: E0130 00:11:20.722857 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.723464 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-operator\"/\"iptables-alerter-script\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.727833 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.728274 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.728289 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.728315 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.728338 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:20Z","lastTransitionTime":"2026-01-30T00:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.735983 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34177974-8d82-49d2-a763-391d0df3bbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m7xz2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-7bdcf4f5bd-7fjxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.749760 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:20 crc kubenswrapper[5113]: E0130 00:11:20.749866 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.749889 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:20 crc kubenswrapper[5113]: E0130 00:11:20.750006 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qx4gj" podUID="d655d34c-2969-43f2-8e93-455507c7cfda" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.750164 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-fhkjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17b87002-b798-480a-8e17-83053d698239\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gwt8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-fhkjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.766310 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34177974-8d82-49d2-a763-391d0df3bbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m7xz2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-7bdcf4f5bd-7fjxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.771704 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.774136 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"openshift-service-ca.crt\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.774508 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"kube-root-ca.crt\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.774958 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"multus-daemon-config\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.775074 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"default-dockercfg-g6kgg\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.775112 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"cni-copy-resources\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.784037 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f863fff9-286a-45fa-b8f0-8a86994b8440\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l7w75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5bb8f5cd97-xdvz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.792544 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.794763 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"multus-ancillary-tools-dockercfg-nwglk\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.794767 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"whereabouts-flatfile-config\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.794774 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"default-cni-sysctl-allowlist\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.802389 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-dgvkt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc4541ce-7789-4670-bc75-5c2868e52ce0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-dgvkt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.816103 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-67v4x" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.816409 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-5jnd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dsgwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-5jnd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.818921 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns\"/\"node-resolver-dockercfg-tk7bt\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.819198 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns\"/\"openshift-service-ca.crt\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.819349 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns\"/\"kube-root-ca.crt\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.826550 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.827047 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fbdfe828b092b23e6d4480daf3e0216aada6debaf1ef1b314a0a31e73ebf13c4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-5ff7774fd9-nljh6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.828461 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ovn-kubernetes\"/\"ovn-node-metrics-cert\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.828997 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"ovnkube-config\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.830033 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"env-overrides\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.830129 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"kube-root-ca.crt\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.830201 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ovn-kubernetes\"/\"ovn-kubernetes-node-dockercfg-l2v2m\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.830826 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.830882 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.830897 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.830916 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.830931 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:20Z","lastTransitionTime":"2026-01-30T00:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.831872 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"openshift-service-ca.crt\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.833650 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"ovnkube-script-lib\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.842959 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-5jnd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dsgwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-5jnd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.844343 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-4q767" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.857001 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"image-registry-certificates\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.857010 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"openshift-service-ca.crt\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.857602 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"node-ca-dockercfg-tjs74\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.857926 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"kube-root-ca.crt\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.867037 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/multus-mbd62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2f6lr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mbd62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.878996 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.881024 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6efa070ceb93cc5fc2e76eab6d9c96ac3c4f8812085d0b6eb6e3f513b5bac782\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3454e762466e22e2a893650b9781823558bc6fdfda2aa4188aff3cb819014c4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/etc/whereabouts/config\\\",\\\"name\\\":\\\"whereabouts-flatfile-configmap\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-t4r5k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.884618 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-config-operator\"/\"kube-rbac-proxy\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.884706 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-daemon-dockercfg-w9nzh\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.884630 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-config-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.884967 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"proxy-tls\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.886747 5113 scope.go:117] "RemoveContainer" containerID="524b98e238697d411aaaba56575ee93f25656aefb79f17572a1504e3f52a32ef" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.887227 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.887387 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-config-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:20 crc kubenswrapper[5113]: E0130 00:11:20.888148 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.889267 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ovn-kubernetes\"/\"ovn-control-plane-metrics-cert\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.889827 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ovn-kubernetes\"/\"ovn-kubernetes-control-plane-dockercfg-nl8tp\"" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.896355 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/34177974-8d82-49d2-a763-391d0df3bbd8-host-etc-kube\") pod \"network-operator-7bdcf4f5bd-7fjxv\" (UID: \"34177974-8d82-49d2-a763-391d0df3bbd8\") " pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.896438 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-hostroot\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.896483 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g27c2\" (UniqueName: \"kubernetes.io/projected/d9239213-5213-4f95-9acf-9d99c18c3f5a-kube-api-access-g27c2\") pod \"node-resolver-67v4x\" (UID: \"d9239213-5213-4f95-9acf-9d99c18c3f5a\") " pod="openshift-dns/node-resolver-67v4x" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.896545 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-cnibin\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.896582 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-run-k8s-cni-cncf-io\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.896614 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-run-netns\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.896645 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-multus-conf-dir\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.896681 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-cni-binary-copy\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.896712 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-multus-socket-dir-parent\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.896748 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-etc-kubernetes\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.896781 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-run-multus-certs\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.896857 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-var-lib-cni-bin\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.896914 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58vql\" (UniqueName: \"kubernetes.io/projected/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-kube-api-access-58vql\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.896952 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d9239213-5213-4f95-9acf-9d99c18c3f5a-hosts-file\") pod \"node-resolver-67v4x\" (UID: \"d9239213-5213-4f95-9acf-9d99c18c3f5a\") " pod="openshift-dns/node-resolver-67v4x" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.896921 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-dns/node-resolver-67v4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9239213-5213-4f95-9acf-9d99c18c3f5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g27c2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-67v4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897019 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-m7xz2\" (UniqueName: \"kubernetes.io/projected/34177974-8d82-49d2-a763-391d0df3bbd8-kube-api-access-m7xz2\") pod \"network-operator-7bdcf4f5bd-7fjxv\" (UID: \"34177974-8d82-49d2-a763-391d0df3bbd8\") " pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897160 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-os-release\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897182 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-var-lib-kubelet\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897206 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-iptables-alerter-script\") pod \"iptables-alerter-5jnd7\" (UID: \"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\") " pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897354 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-system-cni-dir\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897397 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2f6lr\" (UniqueName: \"kubernetes.io/projected/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-kube-api-access-2f6lr\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897427 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-cnibin\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897455 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/d9239213-5213-4f95-9acf-9d99c18c3f5a-tmp-dir\") pod \"node-resolver-67v4x\" (UID: \"d9239213-5213-4f95-9acf-9d99c18c3f5a\") " pod="openshift-dns/node-resolver-67v4x" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897499 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gwt8b\" (UniqueName: \"kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b\") pod \"network-check-target-fhkjl\" (UID: \"17b87002-b798-480a-8e17-83053d698239\") " pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897598 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-multus-cni-dir\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897633 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-var-lib-cni-multus\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897670 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-host-slash\") pod \"iptables-alerter-5jnd7\" (UID: \"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\") " pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897703 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897775 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-os-release\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897818 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fc4541ce-7789-4670-bc75-5c2868e52ce0-webhook-cert\") pod \"network-node-identity-dgvkt\" (UID: \"fc4541ce-7789-4670-bc75-5c2868e52ce0\") " pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897855 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/34177974-8d82-49d2-a763-391d0df3bbd8-metrics-tls\") pod \"network-operator-7bdcf4f5bd-7fjxv\" (UID: \"34177974-8d82-49d2-a763-391d0df3bbd8\") " pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897889 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-l7w75\" (UniqueName: \"kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75\") pod \"network-check-source-5bb8f5cd97-xdvz5\" (UID: \"f863fff9-286a-45fa-b8f0-8a86994b8440\") " pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.897962 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-cni-binary-copy\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.898003 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/fc4541ce-7789-4670-bc75-5c2868e52ce0-ovnkube-identity-cm\") pod \"network-node-identity-dgvkt\" (UID: \"fc4541ce-7789-4670-bc75-5c2868e52ce0\") " pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.898043 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-system-cni-dir\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.898082 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"whereabouts-flatfile-configmap\" (UniqueName: \"kubernetes.io/configmap/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-whereabouts-flatfile-configmap\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.898118 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-multus-daemon-config\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.898154 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-dsgwk\" (UniqueName: \"kubernetes.io/projected/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-kube-api-access-dsgwk\") pod \"iptables-alerter-5jnd7\" (UID: \"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\") " pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.898192 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs\") pod \"network-metrics-daemon-qx4gj\" (UID: \"d655d34c-2969-43f2-8e93-455507c7cfda\") " pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.898225 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-tuning-conf-dir\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.898263 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.898369 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lldsn\" (UniqueName: \"kubernetes.io/projected/d655d34c-2969-43f2-8e93-455507c7cfda-kube-api-access-lldsn\") pod \"network-metrics-daemon-qx4gj\" (UID: \"d655d34c-2969-43f2-8e93-455507c7cfda\") " pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:20 crc kubenswrapper[5113]: E0130 00:11:20.898538 5113 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.900168 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/fc4541ce-7789-4670-bc75-5c2868e52ce0-env-overrides\") pod \"network-node-identity-dgvkt\" (UID: \"fc4541ce-7789-4670-bc75-5c2868e52ce0\") " pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.900265 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-8nt2j\" (UniqueName: \"kubernetes.io/projected/fc4541ce-7789-4670-bc75-5c2868e52ce0-kube-api-access-8nt2j\") pod \"network-node-identity-dgvkt\" (UID: \"fc4541ce-7789-4670-bc75-5c2868e52ce0\") " pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.900316 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:20 crc kubenswrapper[5113]: E0130 00:11:20.900496 5113 secret.go:189] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:20 crc kubenswrapper[5113]: E0130 00:11:20.900646 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:21.400613976 +0000 UTC m=+101.473219373 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.901031 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/fc4541ce-7789-4670-bc75-5c2868e52ce0-env-overrides\") pod \"network-node-identity-dgvkt\" (UID: \"fc4541ce-7789-4670-bc75-5c2868e52ce0\") " pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:20 crc kubenswrapper[5113]: E0130 00:11:20.901108 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:21.401090431 +0000 UTC m=+101.473695828 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.904165 5113 desired_state_of_world_populator.go:158] "Finished populating initial desired state of world" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.907058 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/fc4541ce-7789-4670-bc75-5c2868e52ce0-ovnkube-identity-cm\") pod \"network-node-identity-dgvkt\" (UID: \"fc4541ce-7789-4670-bc75-5c2868e52ce0\") " pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.909967 5113 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.910283 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-iptables-alerter-script\") pod \"iptables-alerter-5jnd7\" (UID: \"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\") " pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.910686 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34177974-8d82-49d2-a763-391d0df3bbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m7xz2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-7bdcf4f5bd-7fjxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: E0130 00:11:20.912508 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:20 crc kubenswrapper[5113]: E0130 00:11:20.912644 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:20 crc kubenswrapper[5113]: E0130 00:11:20.912722 5113 projected.go:194] Error preparing data for projected volume kube-api-access-l7w75 for pod openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:20 crc kubenswrapper[5113]: E0130 00:11:20.912889 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75 podName:f863fff9-286a-45fa-b8f0-8a86994b8440 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:21.412866527 +0000 UTC m=+101.485471904 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-l7w75" (UniqueName: "kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75") pod "network-check-source-5bb8f5cd97-xdvz5" (UID: "f863fff9-286a-45fa-b8f0-8a86994b8440") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.917626 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-dsgwk\" (UniqueName: \"kubernetes.io/projected/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-kube-api-access-dsgwk\") pod \"iptables-alerter-5jnd7\" (UID: \"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\") " pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.917686 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fc4541ce-7789-4670-bc75-5c2868e52ce0-webhook-cert\") pod \"network-node-identity-dgvkt\" (UID: \"fc4541ce-7789-4670-bc75-5c2868e52ce0\") " pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.917641 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7xz2\" (UniqueName: \"kubernetes.io/projected/34177974-8d82-49d2-a763-391d0df3bbd8-kube-api-access-m7xz2\") pod \"network-operator-7bdcf4f5bd-7fjxv\" (UID: \"34177974-8d82-49d2-a763-391d0df3bbd8\") " pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.917927 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/34177974-8d82-49d2-a763-391d0df3bbd8-metrics-tls\") pod \"network-operator-7bdcf4f5bd-7fjxv\" (UID: \"34177974-8d82-49d2-a763-391d0df3bbd8\") " pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:20 crc kubenswrapper[5113]: E0130 00:11:20.919463 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:20 crc kubenswrapper[5113]: E0130 00:11:20.919493 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:20 crc kubenswrapper[5113]: E0130 00:11:20.919511 5113 projected.go:194] Error preparing data for projected volume kube-api-access-gwt8b for pod openshift-network-diagnostics/network-check-target-fhkjl: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:20 crc kubenswrapper[5113]: E0130 00:11:20.919654 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b podName:17b87002-b798-480a-8e17-83053d698239 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:21.419607327 +0000 UTC m=+101.492212724 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-gwt8b" (UniqueName: "kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b") pod "network-check-target-fhkjl" (UID: "17b87002-b798-480a-8e17-83053d698239") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.920741 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nt2j\" (UniqueName: \"kubernetes.io/projected/fc4541ce-7789-4670-bc75-5c2868e52ce0-kube-api-access-8nt2j\") pod \"network-node-identity-dgvkt\" (UID: \"fc4541ce-7789-4670-bc75-5c2868e52ce0\") " pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.921399 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f863fff9-286a-45fa-b8f0-8a86994b8440\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l7w75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5bb8f5cd97-xdvz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.930140 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-dgvkt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc4541ce-7789-4670-bc75-5c2868e52ce0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-dgvkt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.934711 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.934742 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.934751 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.934767 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.934778 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:20Z","lastTransitionTime":"2026-01-30T00:11:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.937341 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qx4gj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d655d34c-2969-43f2-8e93-455507c7cfda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lldsn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:49b34ce0d25eec7a6077f4bf21bf7d4e64e598d28785a20b9ee3594423b7de14\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lldsn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qx4gj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.946178 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-fhkjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17b87002-b798-480a-8e17-83053d698239\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gwt8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-fhkjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.955737 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27d4d422-313b-48d2-b7ec-7e914beaac62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g6xzs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g6xzs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-57b78d8988-trlrx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.964790 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-5jnd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dsgwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-5jnd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.973459 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6255149e-f462-4c80-a8d7-fcdd2ce199cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://1f0821e94ddb2e6bf615e3accd0ec0c094ad7318840fb733498457386fa12672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://d756a0c45f137a1c35d97de642058fe0719246aa403da963da642b3575e4a7c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://5aca51a566a8bfc282a87f7f7c29ccaa92469aca9b326106d1e177dcb6a0b159\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://1674b7d60c4a07220d2988766c24f8b6c7835f7d1736041dc4cdf00f7a96e9e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1674b7d60c4a07220d2988766c24f8b6c7835f7d1736041dc4cdf00f7a96e9e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:40Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:20 crc kubenswrapper[5113]: I0130 00:11:20.991841 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7641c4c3-6ba9-47e9-9fd9-cf5a8e3705af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"300m\\\",\\\"memory\\\":\\\"600Mi\\\"},\\\"containerID\\\":\\\"cri-o://18179ab63cf80a7f758b7fd5824271423f89324275d958eb798af84b0b460a97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"300m\\\",\\\"memory\\\":\\\"600Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:45Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"40m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://895ff1a1765310a6aee57eb2705b412bd05dfe3974bd21a972125242e01f91a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"40m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:45Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://ab0ebe5822f8252d91adf6695fa0650c95e18f3a4a36ba72dfb2277b4ab1778c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:45Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://3802970e9c97e6b99feaf7ca3cfd1f6939675398a66b6034ad9d323eae1838ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:46Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://c84daac274d8f9e1ac29f34d4d1dbecebb0dea078366aa37a4cbbc588a678232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:45Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://f6f2151f8ca9d295331aa70c7cb364785d177a2cabe410797748bb8b3f2d294e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6f2151f8ca9d295331aa70c7cb364785d177a2cabe410797748bb8b3f2d294e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd-auto-backup\\\",\\\"name\\\":\\\"etcd-auto-backup-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://3c5720519c1757ed7c8bf0c9f56cb990b9f450f7c9c6bc1fd4961f8851f2cd14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c5720519c1757ed7c8bf0c9f56cb990b9f450f7c9c6bc1fd4961f8851f2cd14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://65e9cdf813a17a4871a7677b2b0d236147c4db43f9b87a38b60c0795d93a5207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65e9cdf813a17a4871a7677b2b0d236147c4db43f9b87a38b60c0795d93a5207\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:44Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.000976 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7599e0b6-bddf-4def-b7f2-0b32206e8651-serving-cert\") pod \"7599e0b6-bddf-4def-b7f2-0b32206e8651\" (UID: \"7599e0b6-bddf-4def-b7f2-0b32206e8651\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001038 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d565531a-ff86-4608-9d19-767de01ac31b-images\") pod \"d565531a-ff86-4608-9d19-767de01ac31b\" (UID: \"d565531a-ff86-4608-9d19-767de01ac31b\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001059 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94l9h\" (UniqueName: \"kubernetes.io/projected/16bdd140-dce1-464c-ab47-dd5798d1d256-kube-api-access-94l9h\") pod \"16bdd140-dce1-464c-ab47-dd5798d1d256\" (UID: \"16bdd140-dce1-464c-ab47-dd5798d1d256\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001080 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-config\") pod \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\" (UID: \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001099 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-oauth-config\") pod \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\" (UID: \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001118 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ptkcf\" (UniqueName: \"kubernetes.io/projected/7599e0b6-bddf-4def-b7f2-0b32206e8651-kube-api-access-ptkcf\") pod \"7599e0b6-bddf-4def-b7f2-0b32206e8651\" (UID: \"7599e0b6-bddf-4def-b7f2-0b32206e8651\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001133 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d7e8f42f-dc0e-424b-bb56-5ec849834888-service-ca\") pod \"d7e8f42f-dc0e-424b-bb56-5ec849834888\" (UID: \"d7e8f42f-dc0e-424b-bb56-5ec849834888\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001151 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-client-ca\") pod \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\" (UID: \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001173 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-marketplace-operator-metrics\") pod \"b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a\" (UID: \"b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001207 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7afa918d-be67-40a6-803c-d3b0ae99d815-kube-api-access\") pod \"7afa918d-be67-40a6-803c-d3b0ae99d815\" (UID: \"7afa918d-be67-40a6-803c-d3b0ae99d815\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001225 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/f65c0ac1-8bca-454d-a2e6-e35cb418beac-tmp-dir\") pod \"f65c0ac1-8bca-454d-a2e6-e35cb418beac\" (UID: \"f65c0ac1-8bca-454d-a2e6-e35cb418beac\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001244 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c491984c-7d4b-44aa-8c1e-d7974424fa47-images\") pod \"c491984c-7d4b-44aa-8c1e-d7974424fa47\" (UID: \"c491984c-7d4b-44aa-8c1e-d7974424fa47\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001265 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/6ee8fbd3-1f81-4666-96da-5afc70819f1a-samples-operator-tls\") pod \"6ee8fbd3-1f81-4666-96da-5afc70819f1a\" (UID: \"6ee8fbd3-1f81-4666-96da-5afc70819f1a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001292 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-tmp\") pod \"b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a\" (UID: \"b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001311 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-tmpfs\") pod \"7fcc6409-8a0f-44c3-89e7-5aecd7610f8a\" (UID: \"7fcc6409-8a0f-44c3-89e7-5aecd7610f8a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001326 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01080b46-74f1-4191-8755-5152a57b3b25-config\") pod \"01080b46-74f1-4191-8755-5152a57b3b25\" (UID: \"01080b46-74f1-4191-8755-5152a57b3b25\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001345 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-login\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001362 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ws8zz\" (UniqueName: \"kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-kube-api-access-ws8zz\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001380 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-etcd-serving-ca\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001396 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ddlk9\" (UniqueName: \"kubernetes.io/projected/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-kube-api-access-ddlk9\") pod \"7fcc6409-8a0f-44c3-89e7-5aecd7610f8a\" (UID: \"7fcc6409-8a0f-44c3-89e7-5aecd7610f8a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001413 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/92dfbade-90b6-4169-8c07-72cff7f2c82b-tmp-dir\") pod \"92dfbade-90b6-4169-8c07-72cff7f2c82b\" (UID: \"92dfbade-90b6-4169-8c07-72cff7f2c82b\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001429 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-cni-binary-copy\") pod \"869851b9-7ffb-4af0-b166-1d8aa40a5f80\" (UID: \"869851b9-7ffb-4af0-b166-1d8aa40a5f80\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001445 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-serving-cert\") pod \"d45be74c-0d98-4d18-90e4-f7ef1b6daaf7\" (UID: \"d45be74c-0d98-4d18-90e4-f7ef1b6daaf7\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001462 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-config\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001482 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-trusted-ca-bundle\") pod \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\" (UID: \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001502 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l87hs\" (UniqueName: \"kubernetes.io/projected/5ebfebf6-3ecd-458e-943f-bb25b52e2718-kube-api-access-l87hs\") pod \"5ebfebf6-3ecd-458e-943f-bb25b52e2718\" (UID: \"5ebfebf6-3ecd-458e-943f-bb25b52e2718\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001554 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/18f80adb-c1c3-49ba-8ee4-932c851d3897-service-ca-bundle\") pod \"18f80adb-c1c3-49ba-8ee4-932c851d3897\" (UID: \"18f80adb-c1c3-49ba-8ee4-932c851d3897\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001572 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pgx6b\" (UniqueName: \"kubernetes.io/projected/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4-kube-api-access-pgx6b\") pod \"f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4\" (UID: \"f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001633 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-service-ca\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001653 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/7afa918d-be67-40a6-803c-d3b0ae99d815-tmp\") pod \"7afa918d-be67-40a6-803c-d3b0ae99d815\" (UID: \"7afa918d-be67-40a6-803c-d3b0ae99d815\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001694 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pddnv\" (UniqueName: \"kubernetes.io/projected/e093be35-bb62-4843-b2e8-094545761610-kube-api-access-pddnv\") pod \"e093be35-bb62-4843-b2e8-094545761610\" (UID: \"e093be35-bb62-4843-b2e8-094545761610\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001712 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/567683bd-0efc-4f21-b076-e28559628404-tmp-dir\") pod \"567683bd-0efc-4f21-b076-e28559628404\" (UID: \"567683bd-0efc-4f21-b076-e28559628404\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001731 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g4lr\" (UniqueName: \"kubernetes.io/projected/f7e2c886-118e-43bb-bef1-c78134de392b-kube-api-access-6g4lr\") pod \"f7e2c886-118e-43bb-bef1-c78134de392b\" (UID: \"f7e2c886-118e-43bb-bef1-c78134de392b\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001753 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31fa8943-81cc-4750-a0b7-0fa9ab5af883-catalog-content\") pod \"31fa8943-81cc-4750-a0b7-0fa9ab5af883\" (UID: \"31fa8943-81cc-4750-a0b7-0fa9ab5af883\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001775 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-service-ca-bundle\") pod \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\" (UID: \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001797 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-auth-proxy-config\") pod \"fc8db2c7-859d-47b3-a900-2bd0c0b2973b\" (UID: \"fc8db2c7-859d-47b3-a900-2bd0c0b2973b\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001818 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4g8ts\" (UniqueName: \"kubernetes.io/projected/92dfbade-90b6-4169-8c07-72cff7f2c82b-kube-api-access-4g8ts\") pod \"92dfbade-90b6-4169-8c07-72cff7f2c82b\" (UID: \"92dfbade-90b6-4169-8c07-72cff7f2c82b\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001863 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7afa918d-be67-40a6-803c-d3b0ae99d815-serving-cert\") pod \"7afa918d-be67-40a6-803c-d3b0ae99d815\" (UID: \"7afa918d-be67-40a6-803c-d3b0ae99d815\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001913 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c491984c-7d4b-44aa-8c1e-d7974424fa47-config\") pod \"c491984c-7d4b-44aa-8c1e-d7974424fa47\" (UID: \"c491984c-7d4b-44aa-8c1e-d7974424fa47\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001929 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a555ff2e-0be6-46d5-897d-863bb92ae2b3-serving-cert\") pod \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\" (UID: \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001947 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01080b46-74f1-4191-8755-5152a57b3b25-serving-cert\") pod \"01080b46-74f1-4191-8755-5152a57b3b25\" (UID: \"01080b46-74f1-4191-8755-5152a57b3b25\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001964 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-etcd-service-ca\") pod \"567683bd-0efc-4f21-b076-e28559628404\" (UID: \"567683bd-0efc-4f21-b076-e28559628404\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.001980 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/81e39f7b-62e4-4fc9-992a-6535ce127a02-cni-binary-copy\") pod \"81e39f7b-62e4-4fc9-992a-6535ce127a02\" (UID: \"81e39f7b-62e4-4fc9-992a-6535ce127a02\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002003 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4smf\" (UniqueName: \"kubernetes.io/projected/0dd0fbac-8c0d-4228-8faa-abbeedabf7db-kube-api-access-q4smf\") pod \"0dd0fbac-8c0d-4228-8faa-abbeedabf7db\" (UID: \"0dd0fbac-8c0d-4228-8faa-abbeedabf7db\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002023 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qgrkj\" (UniqueName: \"kubernetes.io/projected/42a11a02-47e1-488f-b270-2679d3298b0e-kube-api-access-qgrkj\") pod \"42a11a02-47e1-488f-b270-2679d3298b0e\" (UID: \"42a11a02-47e1-488f-b270-2679d3298b0e\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002048 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/301e1965-1754-483d-b6cc-bfae7038bbca-tmpfs\") pod \"301e1965-1754-483d-b6cc-bfae7038bbca\" (UID: \"301e1965-1754-483d-b6cc-bfae7038bbca\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002069 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wj4qr\" (UniqueName: \"kubernetes.io/projected/149b3c48-e17c-4a66-a835-d86dabf6ff13-kube-api-access-wj4qr\") pod \"149b3c48-e17c-4a66-a835-d86dabf6ff13\" (UID: \"149b3c48-e17c-4a66-a835-d86dabf6ff13\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002087 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/301e1965-1754-483d-b6cc-bfae7038bbca-srv-cert\") pod \"301e1965-1754-483d-b6cc-bfae7038bbca\" (UID: \"301e1965-1754-483d-b6cc-bfae7038bbca\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002103 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4hb7m\" (UniqueName: \"kubernetes.io/projected/94a6e063-3d1a-4d44-875d-185291448c31-kube-api-access-4hb7m\") pod \"94a6e063-3d1a-4d44-875d-185291448c31\" (UID: \"94a6e063-3d1a-4d44-875d-185291448c31\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002119 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-router-certs\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002136 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9vsz9\" (UniqueName: \"kubernetes.io/projected/c491984c-7d4b-44aa-8c1e-d7974424fa47-kube-api-access-9vsz9\") pod \"c491984c-7d4b-44aa-8c1e-d7974424fa47\" (UID: \"c491984c-7d4b-44aa-8c1e-d7974424fa47\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002152 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-machine-approver-tls\") pod \"fc8db2c7-859d-47b3-a900-2bd0c0b2973b\" (UID: \"fc8db2c7-859d-47b3-a900-2bd0c0b2973b\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002168 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-etcd-ca\") pod \"567683bd-0efc-4f21-b076-e28559628404\" (UID: \"567683bd-0efc-4f21-b076-e28559628404\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002187 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/567683bd-0efc-4f21-b076-e28559628404-serving-cert\") pod \"567683bd-0efc-4f21-b076-e28559628404\" (UID: \"567683bd-0efc-4f21-b076-e28559628404\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002203 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5lgh\" (UniqueName: \"kubernetes.io/projected/d19cb085-0c5b-4810-b654-ce7923221d90-kube-api-access-m5lgh\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002219 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovnkube-script-lib\") pod \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\" (UID: \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002236 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/593a3561-7760-45c5-8f91-5aaef7475d0f-node-bootstrap-token\") pod \"593a3561-7760-45c5-8f91-5aaef7475d0f\" (UID: \"593a3561-7760-45c5-8f91-5aaef7475d0f\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002254 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/f7e2c886-118e-43bb-bef1-c78134de392b-tmp-dir\") pod \"f7e2c886-118e-43bb-bef1-c78134de392b\" (UID: \"f7e2c886-118e-43bb-bef1-c78134de392b\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002271 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w94wk\" (UniqueName: \"kubernetes.io/projected/01080b46-74f1-4191-8755-5152a57b3b25-kube-api-access-w94wk\") pod \"01080b46-74f1-4191-8755-5152a57b3b25\" (UID: \"01080b46-74f1-4191-8755-5152a57b3b25\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002287 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a7a88189-c967-4640-879e-27665747f20c-webhook-cert\") pod \"a7a88189-c967-4640-879e-27665747f20c\" (UID: \"a7a88189-c967-4640-879e-27665747f20c\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002307 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-etcd-client\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002330 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tknt7\" (UniqueName: \"kubernetes.io/projected/584e1f4a-8205-47d7-8efb-3afc6017c4c9-kube-api-access-tknt7\") pod \"584e1f4a-8205-47d7-8efb-3afc6017c4c9\" (UID: \"584e1f4a-8205-47d7-8efb-3afc6017c4c9\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002352 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-proxy-ca-bundles\") pod \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\" (UID: \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002370 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2325ffef-9d5b-447f-b00e-3efc429acefe-trusted-ca\") pod \"2325ffef-9d5b-447f-b00e-3efc429acefe\" (UID: \"2325ffef-9d5b-447f-b00e-3efc429acefe\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002391 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b605f283-6f2e-42da-a838-54421690f7d0-catalog-content\") pod \"b605f283-6f2e-42da-a838-54421690f7d0\" (UID: \"b605f283-6f2e-42da-a838-54421690f7d0\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002410 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a208c9c2-333b-4b4a-be0d-bc32ec38a821-package-server-manager-serving-cert\") pod \"a208c9c2-333b-4b4a-be0d-bc32ec38a821\" (UID: \"a208c9c2-333b-4b4a-be0d-bc32ec38a821\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002428 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7df94c10-441d-4386-93a6-6730fb7bcde0-ovn-control-plane-metrics-cert\") pod \"7df94c10-441d-4386-93a6-6730fb7bcde0\" (UID: \"7df94c10-441d-4386-93a6-6730fb7bcde0\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002447 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0dd0fbac-8c0d-4228-8faa-abbeedabf7db-webhook-certs\") pod \"0dd0fbac-8c0d-4228-8faa-abbeedabf7db\" (UID: \"0dd0fbac-8c0d-4228-8faa-abbeedabf7db\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002469 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4tqq\" (UniqueName: \"kubernetes.io/projected/6ee8fbd3-1f81-4666-96da-5afc70819f1a-kube-api-access-d4tqq\") pod \"6ee8fbd3-1f81-4666-96da-5afc70819f1a\" (UID: \"6ee8fbd3-1f81-4666-96da-5afc70819f1a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002489 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9e9b5059-1b3e-4067-a63d-2952cbe863af-ca-trust-extracted\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002506 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9e9b5059-1b3e-4067-a63d-2952cbe863af-trusted-ca\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002527 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted-pem\" (UniqueName: \"kubernetes.io/empty-dir/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-ca-trust-extracted-pem\") pod \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\" (UID: \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002557 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4-metrics-certs\") pod \"f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4\" (UID: \"f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002573 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9f71a554-e414-4bc3-96d2-674060397afe-metrics-tls\") pod \"9f71a554-e414-4bc3-96d2-674060397afe\" (UID: \"9f71a554-e414-4bc3-96d2-674060397afe\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002590 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-tmp\") pod \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\" (UID: \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002609 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftwb6\" (UniqueName: \"kubernetes.io/projected/9f71a554-e414-4bc3-96d2-674060397afe-kube-api-access-ftwb6\") pod \"9f71a554-e414-4bc3-96d2-674060397afe\" (UID: \"9f71a554-e414-4bc3-96d2-674060397afe\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002626 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzt4w\" (UniqueName: \"kubernetes.io/projected/a52afe44-fb37-46ed-a1f8-bf39727a3cbe-kube-api-access-rzt4w\") pod \"a52afe44-fb37-46ed-a1f8-bf39727a3cbe\" (UID: \"a52afe44-fb37-46ed-a1f8-bf39727a3cbe\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002651 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmmzf\" (UniqueName: \"kubernetes.io/projected/7df94c10-441d-4386-93a6-6730fb7bcde0-kube-api-access-nmmzf\") pod \"7df94c10-441d-4386-93a6-6730fb7bcde0\" (UID: \"7df94c10-441d-4386-93a6-6730fb7bcde0\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002683 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxfcv\" (UniqueName: \"kubernetes.io/projected/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-kube-api-access-xxfcv\") pod \"9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff\" (UID: \"9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002706 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/c5f2bfad-70f6-4185-a3d9-81ce12720767-tmp-dir\") pod \"c5f2bfad-70f6-4185-a3d9-81ce12720767\" (UID: \"c5f2bfad-70f6-4185-a3d9-81ce12720767\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003619 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/736c54fe-349c-4bb9-870a-d1c1d1c03831-serving-cert\") pod \"736c54fe-349c-4bb9-870a-d1c1d1c03831\" (UID: \"736c54fe-349c-4bb9-870a-d1c1d1c03831\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003668 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-mcd-auth-proxy-config\") pod \"e1d2a42d-af1d-4054-9618-ab545e0ed8b7\" (UID: \"e1d2a42d-af1d-4054-9618-ab545e0ed8b7\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003689 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09cfa50b-4138-4585-a53e-64dd3ab73335-config\") pod \"09cfa50b-4138-4585-a53e-64dd3ab73335\" (UID: \"09cfa50b-4138-4585-a53e-64dd3ab73335\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003719 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/ce090a97-9ab6-4c40-a719-64ff2acd9778-signing-cabundle\") pod \"ce090a97-9ab6-4c40-a719-64ff2acd9778\" (UID: \"ce090a97-9ab6-4c40-a719-64ff2acd9778\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003763 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hm9x7\" (UniqueName: \"kubernetes.io/projected/f559dfa3-3917-43a2-97f6-61ddfda10e93-kube-api-access-hm9x7\") pod \"f559dfa3-3917-43a2-97f6-61ddfda10e93\" (UID: \"f559dfa3-3917-43a2-97f6-61ddfda10e93\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003788 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b605f283-6f2e-42da-a838-54421690f7d0-utilities\") pod \"b605f283-6f2e-42da-a838-54421690f7d0\" (UID: \"b605f283-6f2e-42da-a838-54421690f7d0\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003809 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-bound-sa-token\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003842 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-provider-selection\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003860 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5f2bfad-70f6-4185-a3d9-81ce12720767-config\") pod \"c5f2bfad-70f6-4185-a3d9-81ce12720767\" (UID: \"c5f2bfad-70f6-4185-a3d9-81ce12720767\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003878 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7df94c10-441d-4386-93a6-6730fb7bcde0-ovnkube-config\") pod \"7df94c10-441d-4386-93a6-6730fb7bcde0\" (UID: \"7df94c10-441d-4386-93a6-6730fb7bcde0\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003895 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a7a88189-c967-4640-879e-27665747f20c-tmpfs\") pod \"a7a88189-c967-4640-879e-27665747f20c\" (UID: \"a7a88189-c967-4640-879e-27665747f20c\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003916 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-bound-sa-token\") pod \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\" (UID: \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003937 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"whereabouts-flatfile-configmap\" (UniqueName: \"kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-whereabouts-flatfile-configmap\") pod \"869851b9-7ffb-4af0-b166-1d8aa40a5f80\" (UID: \"869851b9-7ffb-4af0-b166-1d8aa40a5f80\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003956 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-service-ca\") pod \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\" (UID: \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003972 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-audit-policies\") pod \"f559dfa3-3917-43a2-97f6-61ddfda10e93\" (UID: \"f559dfa3-3917-43a2-97f6-61ddfda10e93\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003991 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-ocp-branding-template\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004009 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d565531a-ff86-4608-9d19-767de01ac31b-auth-proxy-config\") pod \"d565531a-ff86-4608-9d19-767de01ac31b\" (UID: \"d565531a-ff86-4608-9d19-767de01ac31b\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004027 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rmnv\" (UniqueName: \"kubernetes.io/projected/b605f283-6f2e-42da-a838-54421690f7d0-kube-api-access-6rmnv\") pod \"b605f283-6f2e-42da-a838-54421690f7d0\" (UID: \"b605f283-6f2e-42da-a838-54421690f7d0\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004045 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7599e0b6-bddf-4def-b7f2-0b32206e8651-config\") pod \"7599e0b6-bddf-4def-b7f2-0b32206e8651\" (UID: \"7599e0b6-bddf-4def-b7f2-0b32206e8651\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004063 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/593a3561-7760-45c5-8f91-5aaef7475d0f-certs\") pod \"593a3561-7760-45c5-8f91-5aaef7475d0f\" (UID: \"593a3561-7760-45c5-8f91-5aaef7475d0f\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004081 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zg8nc\" (UniqueName: \"kubernetes.io/projected/2325ffef-9d5b-447f-b00e-3efc429acefe-kube-api-access-zg8nc\") pod \"2325ffef-9d5b-447f-b00e-3efc429acefe\" (UID: \"2325ffef-9d5b-447f-b00e-3efc429acefe\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004103 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-env-overrides\") pod \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\" (UID: \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004129 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovn-node-metrics-cert\") pod \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\" (UID: \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004222 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004249 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-registry-tls\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004274 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m26jq\" (UniqueName: \"kubernetes.io/projected/567683bd-0efc-4f21-b076-e28559628404-kube-api-access-m26jq\") pod \"567683bd-0efc-4f21-b076-e28559628404\" (UID: \"567683bd-0efc-4f21-b076-e28559628404\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004299 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grwfz\" (UniqueName: \"kubernetes.io/projected/31fa8943-81cc-4750-a0b7-0fa9ab5af883-kube-api-access-grwfz\") pod \"31fa8943-81cc-4750-a0b7-0fa9ab5af883\" (UID: \"31fa8943-81cc-4750-a0b7-0fa9ab5af883\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004324 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d7e8f42f-dc0e-424b-bb56-5ec849834888-kube-api-access\") pod \"d7e8f42f-dc0e-424b-bb56-5ec849834888\" (UID: \"d7e8f42f-dc0e-424b-bb56-5ec849834888\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002372 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" (UID: "7fcc6409-8a0f-44c3-89e7-5aecd7610f8a"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.002782 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7599e0b6-bddf-4def-b7f2-0b32206e8651-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7599e0b6-bddf-4def-b7f2-0b32206e8651" (UID: "7599e0b6-bddf-4def-b7f2-0b32206e8651"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003799 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ee8fbd3-1f81-4666-96da-5afc70819f1a-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "6ee8fbd3-1f81-4666-96da-5afc70819f1a" (UID: "6ee8fbd3-1f81-4666-96da-5afc70819f1a"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.003986 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/567683bd-0efc-4f21-b076-e28559628404-tmp-dir" (OuterVolumeSpecName: "tmp-dir") pod "567683bd-0efc-4f21-b076-e28559628404" (UID: "567683bd-0efc-4f21-b076-e28559628404"). InnerVolumeSpecName "tmp-dir". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004010 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-client-ca" (OuterVolumeSpecName: "client-ca") pod "a555ff2e-0be6-46d5-897d-863bb92ae2b3" (UID: "a555ff2e-0be6-46d5-897d-863bb92ae2b3"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004092 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2325ffef-9d5b-447f-b00e-3efc429acefe-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "2325ffef-9d5b-447f-b00e-3efc429acefe" (UID: "2325ffef-9d5b-447f-b00e-3efc429acefe"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004562 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-config" (OuterVolumeSpecName: "config") pod "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" (UID: "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004657 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5f2bfad-70f6-4185-a3d9-81ce12720767-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "c5f2bfad-70f6-4185-a3d9-81ce12720767" (UID: "c5f2bfad-70f6-4185-a3d9-81ce12720767"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004319 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b605f283-6f2e-42da-a838-54421690f7d0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b605f283-6f2e-42da-a838-54421690f7d0" (UID: "b605f283-6f2e-42da-a838-54421690f7d0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004358 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7afa918d-be67-40a6-803c-d3b0ae99d815-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "7afa918d-be67-40a6-803c-d3b0ae99d815" (UID: "7afa918d-be67-40a6-803c-d3b0ae99d815"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004779 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7e2c886-118e-43bb-bef1-c78134de392b-kube-api-access-6g4lr" (OuterVolumeSpecName: "kube-api-access-6g4lr") pod "f7e2c886-118e-43bb-bef1-c78134de392b" (UID: "f7e2c886-118e-43bb-bef1-c78134de392b"). InnerVolumeSpecName "kube-api-access-6g4lr". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004859 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7599e0b6-bddf-4def-b7f2-0b32206e8651-kube-api-access-ptkcf" (OuterVolumeSpecName: "kube-api-access-ptkcf") pod "7599e0b6-bddf-4def-b7f2-0b32206e8651" (UID: "7599e0b6-bddf-4def-b7f2-0b32206e8651"). InnerVolumeSpecName "kube-api-access-ptkcf". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004857 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/301e1965-1754-483d-b6cc-bfae7038bbca-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "301e1965-1754-483d-b6cc-bfae7038bbca" (UID: "301e1965-1754-483d-b6cc-bfae7038bbca"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.005120 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/736c54fe-349c-4bb9-870a-d1c1d1c03831-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "736c54fe-349c-4bb9-870a-d1c1d1c03831" (UID: "736c54fe-349c-4bb9-870a-d1c1d1c03831"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.005147 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16bdd140-dce1-464c-ab47-dd5798d1d256-kube-api-access-94l9h" (OuterVolumeSpecName: "kube-api-access-94l9h") pod "16bdd140-dce1-464c-ab47-dd5798d1d256" (UID: "16bdd140-dce1-464c-ab47-dd5798d1d256"). InnerVolumeSpecName "kube-api-access-94l9h". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.005159 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5f2bfad-70f6-4185-a3d9-81ce12720767-tmp-dir" (OuterVolumeSpecName: "tmp-dir") pod "c5f2bfad-70f6-4185-a3d9-81ce12720767" (UID: "c5f2bfad-70f6-4185-a3d9-81ce12720767"). InnerVolumeSpecName "tmp-dir". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.004352 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c5f2bfad-70f6-4185-a3d9-81ce12720767-kube-api-access\") pod \"c5f2bfad-70f6-4185-a3d9-81ce12720767\" (UID: \"c5f2bfad-70f6-4185-a3d9-81ce12720767\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.005274 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d565531a-ff86-4608-9d19-767de01ac31b-images" (OuterVolumeSpecName: "images") pod "d565531a-ff86-4608-9d19-767de01ac31b" (UID: "d565531a-ff86-4608-9d19-767de01ac31b"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.005285 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f65c0ac1-8bca-454d-a2e6-e35cb418beac-kube-api-access\") pod \"f65c0ac1-8bca-454d-a2e6-e35cb418beac\" (UID: \"f65c0ac1-8bca-454d-a2e6-e35cb418beac\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.005335 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f65c0ac1-8bca-454d-a2e6-e35cb418beac-serving-cert\") pod \"f65c0ac1-8bca-454d-a2e6-e35cb418beac\" (UID: \"f65c0ac1-8bca-454d-a2e6-e35cb418beac\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.005639 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" (UID: "b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.005922 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e093be35-bb62-4843-b2e8-094545761610-kube-api-access-pddnv" (OuterVolumeSpecName: "kube-api-access-pddnv") pod "e093be35-bb62-4843-b2e8-094545761610" (UID: "e093be35-bb62-4843-b2e8-094545761610"). InnerVolumeSpecName "kube-api-access-pddnv". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.005991 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-tmp" (OuterVolumeSpecName: "tmp") pod "b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" (UID: "b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006085 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01080b46-74f1-4191-8755-5152a57b3b25-config" (OuterVolumeSpecName: "config") pod "01080b46-74f1-4191-8755-5152a57b3b25" (UID: "01080b46-74f1-4191-8755-5152a57b3b25"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006141 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/92dfbade-90b6-4169-8c07-72cff7f2c82b-metrics-tls\") pod \"92dfbade-90b6-4169-8c07-72cff7f2c82b\" (UID: \"92dfbade-90b6-4169-8c07-72cff7f2c82b\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006181 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b4750666-1362-4001-abd0-6f89964cc621-mcc-auth-proxy-config\") pod \"b4750666-1362-4001-abd0-6f89964cc621\" (UID: \"b4750666-1362-4001-abd0-6f89964cc621\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006210 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94a6e063-3d1a-4d44-875d-185291448c31-catalog-content\") pod \"94a6e063-3d1a-4d44-875d-185291448c31\" (UID: \"94a6e063-3d1a-4d44-875d-185291448c31\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006235 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-marketplace-trusted-ca\") pod \"b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a\" (UID: \"b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006261 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7df94c10-441d-4386-93a6-6730fb7bcde0-env-overrides\") pod \"7df94c10-441d-4386-93a6-6730fb7bcde0\" (UID: \"7df94c10-441d-4386-93a6-6730fb7bcde0\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006286 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-idp-0-file-data\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006313 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zsb9b\" (UniqueName: \"kubernetes.io/projected/09cfa50b-4138-4585-a53e-64dd3ab73335-kube-api-access-zsb9b\") pod \"09cfa50b-4138-4585-a53e-64dd3ab73335\" (UID: \"09cfa50b-4138-4585-a53e-64dd3ab73335\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006336 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a52afe44-fb37-46ed-a1f8-bf39727a3cbe-cert\") pod \"a52afe44-fb37-46ed-a1f8-bf39727a3cbe\" (UID: \"a52afe44-fb37-46ed-a1f8-bf39727a3cbe\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006360 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/584e1f4a-8205-47d7-8efb-3afc6017c4c9-utilities\") pod \"584e1f4a-8205-47d7-8efb-3afc6017c4c9\" (UID: \"584e1f4a-8205-47d7-8efb-3afc6017c4c9\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006485 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pllx6\" (UniqueName: \"kubernetes.io/projected/81e39f7b-62e4-4fc9-992a-6535ce127a02-kube-api-access-pllx6\") pod \"81e39f7b-62e4-4fc9-992a-6535ce127a02\" (UID: \"81e39f7b-62e4-4fc9-992a-6535ce127a02\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006514 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-serving-cert\") pod \"f559dfa3-3917-43a2-97f6-61ddfda10e93\" (UID: \"f559dfa3-3917-43a2-97f6-61ddfda10e93\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006353 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c491984c-7d4b-44aa-8c1e-d7974424fa47-images" (OuterVolumeSpecName: "images") pod "c491984c-7d4b-44aa-8c1e-d7974424fa47" (UID: "c491984c-7d4b-44aa-8c1e-d7974424fa47"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006557 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zth6t\" (UniqueName: \"kubernetes.io/projected/6077b63e-53a2-4f96-9d56-1ce0324e4913-kube-api-access-zth6t\") pod \"6077b63e-53a2-4f96-9d56-1ce0324e4913\" (UID: \"6077b63e-53a2-4f96-9d56-1ce0324e4913\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006584 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-serving-cert\") pod \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\" (UID: \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006609 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-trusted-ca-bundle\") pod \"f559dfa3-3917-43a2-97f6-61ddfda10e93\" (UID: \"f559dfa3-3917-43a2-97f6-61ddfda10e93\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006658 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/5ebfebf6-3ecd-458e-943f-bb25b52e2718-serviceca\") pod \"5ebfebf6-3ecd-458e-943f-bb25b52e2718\" (UID: \"5ebfebf6-3ecd-458e-943f-bb25b52e2718\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006688 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-utilities\") pod \"71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a\" (UID: \"71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006712 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/92dfbade-90b6-4169-8c07-72cff7f2c82b-config-volume\") pod \"92dfbade-90b6-4169-8c07-72cff7f2c82b\" (UID: \"92dfbade-90b6-4169-8c07-72cff7f2c82b\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006738 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6077b63e-53a2-4f96-9d56-1ce0324e4913-metrics-tls\") pod \"6077b63e-53a2-4f96-9d56-1ce0324e4913\" (UID: \"6077b63e-53a2-4f96-9d56-1ce0324e4913\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006763 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-tmp\") pod \"d45be74c-0d98-4d18-90e4-f7ef1b6daaf7\" (UID: \"d45be74c-0d98-4d18-90e4-f7ef1b6daaf7\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006793 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sbc2l\" (UniqueName: \"kubernetes.io/projected/593a3561-7760-45c5-8f91-5aaef7475d0f-kube-api-access-sbc2l\") pod \"593a3561-7760-45c5-8f91-5aaef7475d0f\" (UID: \"593a3561-7760-45c5-8f91-5aaef7475d0f\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006817 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/16bdd140-dce1-464c-ab47-dd5798d1d256-serving-cert\") pod \"16bdd140-dce1-464c-ab47-dd5798d1d256\" (UID: \"16bdd140-dce1-464c-ab47-dd5798d1d256\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006840 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-stats-auth\") pod \"18f80adb-c1c3-49ba-8ee4-932c851d3897\" (UID: \"18f80adb-c1c3-49ba-8ee4-932c851d3897\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006869 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/584e1f4a-8205-47d7-8efb-3afc6017c4c9-catalog-content\") pod \"584e1f4a-8205-47d7-8efb-3afc6017c4c9\" (UID: \"584e1f4a-8205-47d7-8efb-3afc6017c4c9\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006895 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/736c54fe-349c-4bb9-870a-d1c1d1c03831-tmp\") pod \"736c54fe-349c-4bb9-870a-d1c1d1c03831\" (UID: \"736c54fe-349c-4bb9-870a-d1c1d1c03831\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006905 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7a88189-c967-4640-879e-27665747f20c-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "a7a88189-c967-4640-879e-27665747f20c" (UID: "a7a88189-c967-4640-879e-27665747f20c"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006918 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-session\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006736 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f65c0ac1-8bca-454d-a2e6-e35cb418beac-tmp-dir" (OuterVolumeSpecName: "tmp-dir") pod "f65c0ac1-8bca-454d-a2e6-e35cb418beac" (UID: "f65c0ac1-8bca-454d-a2e6-e35cb418beac"). InnerVolumeSpecName "tmp-dir". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006961 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/6077b63e-53a2-4f96-9d56-1ce0324e4913-tmp-dir\") pod \"6077b63e-53a2-4f96-9d56-1ce0324e4913\" (UID: \"6077b63e-53a2-4f96-9d56-1ce0324e4913\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006995 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2325ffef-9d5b-447f-b00e-3efc429acefe-serving-cert\") pod \"2325ffef-9d5b-447f-b00e-3efc429acefe\" (UID: \"2325ffef-9d5b-447f-b00e-3efc429acefe\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007022 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/301e1965-1754-483d-b6cc-bfae7038bbca-profile-collector-cert\") pod \"301e1965-1754-483d-b6cc-bfae7038bbca\" (UID: \"301e1965-1754-483d-b6cc-bfae7038bbca\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007024 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/584e1f4a-8205-47d7-8efb-3afc6017c4c9-kube-api-access-tknt7" (OuterVolumeSpecName: "kube-api-access-tknt7") pod "584e1f4a-8205-47d7-8efb-3afc6017c4c9" (UID: "584e1f4a-8205-47d7-8efb-3afc6017c4c9"). InnerVolumeSpecName "kube-api-access-tknt7". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007049 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-config\") pod \"fc8db2c7-859d-47b3-a900-2bd0c0b2973b\" (UID: \"fc8db2c7-859d-47b3-a900-2bd0c0b2973b\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.006977 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4" (UID: "f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007078 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/42a11a02-47e1-488f-b270-2679d3298b0e-control-plane-machine-set-operator-tls\") pod \"42a11a02-47e1-488f-b270-2679d3298b0e\" (UID: \"42a11a02-47e1-488f-b270-2679d3298b0e\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007105 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dztfv\" (UniqueName: \"kubernetes.io/projected/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-kube-api-access-dztfv\") pod \"d45be74c-0d98-4d18-90e4-f7ef1b6daaf7\" (UID: \"d45be74c-0d98-4d18-90e4-f7ef1b6daaf7\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007131 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ks6v2\" (UniqueName: \"kubernetes.io/projected/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-kube-api-access-ks6v2\") pod \"71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a\" (UID: \"71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007157 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-config\") pod \"567683bd-0efc-4f21-b076-e28559628404\" (UID: \"567683bd-0efc-4f21-b076-e28559628404\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007183 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d7e8f42f-dc0e-424b-bb56-5ec849834888-serving-cert\") pod \"d7e8f42f-dc0e-424b-bb56-5ec849834888\" (UID: \"d7e8f42f-dc0e-424b-bb56-5ec849834888\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007217 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-serving-cert\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007233 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" (UID: "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007247 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jjkz\" (UniqueName: \"kubernetes.io/projected/301e1965-1754-483d-b6cc-bfae7038bbca-kube-api-access-7jjkz\") pod \"301e1965-1754-483d-b6cc-bfae7038bbca\" (UID: \"301e1965-1754-483d-b6cc-bfae7038bbca\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007290 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f65c0ac1-8bca-454d-a2e6-e35cb418beac-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f65c0ac1-8bca-454d-a2e6-e35cb418beac" (UID: "f65c0ac1-8bca-454d-a2e6-e35cb418beac"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007436 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f71a554-e414-4bc3-96d2-674060397afe-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "9f71a554-e414-4bc3-96d2-674060397afe" (UID: "9f71a554-e414-4bc3-96d2-674060397afe"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007379 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7df94c10-441d-4386-93a6-6730fb7bcde0-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "7df94c10-441d-4386-93a6-6730fb7bcde0" (UID: "7df94c10-441d-4386-93a6-6730fb7bcde0"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007499 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7e8f42f-dc0e-424b-bb56-5ec849834888-service-ca" (OuterVolumeSpecName: "service-ca") pod "d7e8f42f-dc0e-424b-bb56-5ec849834888" (UID: "d7e8f42f-dc0e-424b-bb56-5ec849834888"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007510 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c491984c-7d4b-44aa-8c1e-d7974424fa47-kube-api-access-9vsz9" (OuterVolumeSpecName: "kube-api-access-9vsz9") pod "c491984c-7d4b-44aa-8c1e-d7974424fa47" (UID: "c491984c-7d4b-44aa-8c1e-d7974424fa47"). InnerVolumeSpecName "kube-api-access-9vsz9". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007655 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/301e1965-1754-483d-b6cc-bfae7038bbca-kube-api-access-7jjkz" (OuterVolumeSpecName: "kube-api-access-7jjkz") pod "301e1965-1754-483d-b6cc-bfae7038bbca" (UID: "301e1965-1754-483d-b6cc-bfae7038bbca"). InnerVolumeSpecName "kube-api-access-7jjkz". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007755 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "af33e427-6803-48c2-a76a-dd9deb7cbf9a" (UID: "af33e427-6803-48c2-a76a-dd9deb7cbf9a"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008051 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "e1d2a42d-af1d-4054-9618-ab545e0ed8b7" (UID: "e1d2a42d-af1d-4054-9618-ab545e0ed8b7"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008080 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f65c0ac1-8bca-454d-a2e6-e35cb418beac-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "f65c0ac1-8bca-454d-a2e6-e35cb418beac" (UID: "f65c0ac1-8bca-454d-a2e6-e35cb418beac"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008215 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008233 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94a6e063-3d1a-4d44-875d-185291448c31-kube-api-access-4hb7m" (OuterVolumeSpecName: "kube-api-access-4hb7m") pod "94a6e063-3d1a-4d44-875d-185291448c31" (UID: "94a6e063-3d1a-4d44-875d-185291448c31"). InnerVolumeSpecName "kube-api-access-4hb7m". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008290 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-tmp" (OuterVolumeSpecName: "tmp") pod "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" (UID: "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.007288 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovnkube-config\") pod \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\" (UID: \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008376 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7afa918d-be67-40a6-803c-d3b0ae99d815-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7afa918d-be67-40a6-803c-d3b0ae99d815" (UID: "7afa918d-be67-40a6-803c-d3b0ae99d815"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008378 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8nspp\" (UniqueName: \"kubernetes.io/projected/a7a88189-c967-4640-879e-27665747f20c-kube-api-access-8nspp\") pod \"a7a88189-c967-4640-879e-27665747f20c\" (UID: \"a7a88189-c967-4640-879e-27665747f20c\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008413 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" (UID: "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008434 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqbfk\" (UniqueName: \"kubernetes.io/projected/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-kube-api-access-qqbfk\") pod \"b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a\" (UID: \"b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008471 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9f71a554-e414-4bc3-96d2-674060397afe-bound-sa-token\") pod \"9f71a554-e414-4bc3-96d2-674060397afe\" (UID: \"9f71a554-e414-4bc3-96d2-674060397afe\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008552 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a208c9c2-333b-4b4a-be0d-bc32ec38a821-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "a208c9c2-333b-4b4a-be0d-bc32ec38a821" (UID: "a208c9c2-333b-4b4a-be0d-bc32ec38a821"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008598 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-catalog-content\") pod \"71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a\" (UID: \"71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008642 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7afa918d-be67-40a6-803c-d3b0ae99d815-config\") pod \"7afa918d-be67-40a6-803c-d3b0ae99d815\" (UID: \"7afa918d-be67-40a6-803c-d3b0ae99d815\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008671 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/736c54fe-349c-4bb9-870a-d1c1d1c03831-client-ca\") pod \"736c54fe-349c-4bb9-870a-d1c1d1c03831\" (UID: \"736c54fe-349c-4bb9-870a-d1c1d1c03831\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008700 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-catalog-content\") pod \"9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff\" (UID: \"9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008730 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-audit-policies\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008759 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/736c54fe-349c-4bb9-870a-d1c1d1c03831-config\") pod \"736c54fe-349c-4bb9-870a-d1c1d1c03831\" (UID: \"736c54fe-349c-4bb9-870a-d1c1d1c03831\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008828 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "a555ff2e-0be6-46d5-897d-863bb92ae2b3" (UID: "a555ff2e-0be6-46d5-897d-863bb92ae2b3"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008844 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d565531a-ff86-4608-9d19-767de01ac31b-proxy-tls\") pod \"d565531a-ff86-4608-9d19-767de01ac31b\" (UID: \"d565531a-ff86-4608-9d19-767de01ac31b\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008906 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-profile-collector-cert\") pod \"7fcc6409-8a0f-44c3-89e7-5aecd7610f8a\" (UID: \"7fcc6409-8a0f-44c3-89e7-5aecd7610f8a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009048 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/149b3c48-e17c-4a66-a835-d86dabf6ff13-utilities\") pod \"149b3c48-e17c-4a66-a835-d86dabf6ff13\" (UID: \"149b3c48-e17c-4a66-a835-d86dabf6ff13\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009083 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-config\") pod \"d45be74c-0d98-4d18-90e4-f7ef1b6daaf7\" (UID: \"d45be74c-0d98-4d18-90e4-f7ef1b6daaf7\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009115 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/149b3c48-e17c-4a66-a835-d86dabf6ff13-catalog-content\") pod \"149b3c48-e17c-4a66-a835-d86dabf6ff13\" (UID: \"149b3c48-e17c-4a66-a835-d86dabf6ff13\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009142 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2325ffef-9d5b-447f-b00e-3efc429acefe-config\") pod \"2325ffef-9d5b-447f-b00e-3efc429acefe\" (UID: \"2325ffef-9d5b-447f-b00e-3efc429acefe\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009169 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9stx\" (UniqueName: \"kubernetes.io/projected/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-kube-api-access-l9stx\") pod \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\" (UID: \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009198 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9z4sw\" (UniqueName: \"kubernetes.io/projected/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-kube-api-access-9z4sw\") pod \"e1d2a42d-af1d-4054-9618-ab545e0ed8b7\" (UID: \"e1d2a42d-af1d-4054-9618-ab545e0ed8b7\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009228 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c491984c-7d4b-44aa-8c1e-d7974424fa47-machine-api-operator-tls\") pod \"c491984c-7d4b-44aa-8c1e-d7974424fa47\" (UID: \"c491984c-7d4b-44aa-8c1e-d7974424fa47\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009254 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/81e39f7b-62e4-4fc9-992a-6535ce127a02-multus-daemon-config\") pod \"81e39f7b-62e4-4fc9-992a-6535ce127a02\" (UID: \"81e39f7b-62e4-4fc9-992a-6535ce127a02\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009280 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-encryption-config\") pod \"f559dfa3-3917-43a2-97f6-61ddfda10e93\" (UID: \"f559dfa3-3917-43a2-97f6-61ddfda10e93\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009308 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wbmqg\" (UniqueName: \"kubernetes.io/projected/18f80adb-c1c3-49ba-8ee4-932c851d3897-kube-api-access-wbmqg\") pod \"18f80adb-c1c3-49ba-8ee4-932c851d3897\" (UID: \"18f80adb-c1c3-49ba-8ee4-932c851d3897\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009335 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-metrics-certs\") pod \"18f80adb-c1c3-49ba-8ee4-932c851d3897\" (UID: \"18f80adb-c1c3-49ba-8ee4-932c851d3897\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009359 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9e9b5059-1b3e-4067-a63d-2952cbe863af-installation-pull-secrets\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009390 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5lcfw\" (UniqueName: \"kubernetes.io/projected/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-kube-api-access-5lcfw\") pod \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\" (UID: \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009417 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-encryption-config\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009444 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-trusted-ca-bundle\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009474 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tkdh6\" (UniqueName: \"kubernetes.io/projected/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-kube-api-access-tkdh6\") pod \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\" (UID: \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009521 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a7a88189-c967-4640-879e-27665747f20c-apiservice-cert\") pod \"a7a88189-c967-4640-879e-27665747f20c\" (UID: \"a7a88189-c967-4640-879e-27665747f20c\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009575 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-error\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009604 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-proxy-tls\") pod \"e1d2a42d-af1d-4054-9618-ab545e0ed8b7\" (UID: \"e1d2a42d-af1d-4054-9618-ab545e0ed8b7\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009633 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-image-import-ca\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009660 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-oauth-serving-cert\") pod \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\" (UID: \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009690 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xnxbn\" (UniqueName: \"kubernetes.io/projected/ce090a97-9ab6-4c40-a719-64ff2acd9778-kube-api-access-xnxbn\") pod \"ce090a97-9ab6-4c40-a719-64ff2acd9778\" (UID: \"ce090a97-9ab6-4c40-a719-64ff2acd9778\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009719 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xfp5s\" (UniqueName: \"kubernetes.io/projected/cc85e424-18b2-4924-920b-bd291a8c4b01-kube-api-access-xfp5s\") pod \"cc85e424-18b2-4924-920b-bd291a8c4b01\" (UID: \"cc85e424-18b2-4924-920b-bd291a8c4b01\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009744 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-serving-cert\") pod \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\" (UID: \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009806 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-twvbl\" (UniqueName: \"kubernetes.io/projected/b4750666-1362-4001-abd0-6f89964cc621-kube-api-access-twvbl\") pod \"b4750666-1362-4001-abd0-6f89964cc621\" (UID: \"b4750666-1362-4001-abd0-6f89964cc621\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009874 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c5f2bfad-70f6-4185-a3d9-81ce12720767-serving-cert\") pod \"c5f2bfad-70f6-4185-a3d9-81ce12720767\" (UID: \"c5f2bfad-70f6-4185-a3d9-81ce12720767\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009902 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-config\") pod \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\" (UID: \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009926 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-cni-sysctl-allowlist\") pod \"869851b9-7ffb-4af0-b166-1d8aa40a5f80\" (UID: \"869851b9-7ffb-4af0-b166-1d8aa40a5f80\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009956 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc85e424-18b2-4924-920b-bd291a8c4b01-catalog-content\") pod \"cc85e424-18b2-4924-920b-bd291a8c4b01\" (UID: \"cc85e424-18b2-4924-920b-bd291a8c4b01\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009985 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mjwtd\" (UniqueName: \"kubernetes.io/projected/869851b9-7ffb-4af0-b166-1d8aa40a5f80-kube-api-access-mjwtd\") pod \"869851b9-7ffb-4af0-b166-1d8aa40a5f80\" (UID: \"869851b9-7ffb-4af0-b166-1d8aa40a5f80\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010018 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-etcd-serving-ca\") pod \"f559dfa3-3917-43a2-97f6-61ddfda10e93\" (UID: \"f559dfa3-3917-43a2-97f6-61ddfda10e93\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010045 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-trusted-ca-bundle\") pod \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\" (UID: \"dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010072 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-99zj9\" (UniqueName: \"kubernetes.io/projected/d565531a-ff86-4608-9d19-767de01ac31b-kube-api-access-99zj9\") pod \"d565531a-ff86-4608-9d19-767de01ac31b\" (UID: \"d565531a-ff86-4608-9d19-767de01ac31b\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010098 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-trusted-ca-bundle\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010133 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9e9b5059-1b3e-4067-a63d-2952cbe863af-registry-certificates\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010163 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-audit\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010187 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-etcd-client\") pod \"f559dfa3-3917-43a2-97f6-61ddfda10e93\" (UID: \"f559dfa3-3917-43a2-97f6-61ddfda10e93\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010217 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9f71a554-e414-4bc3-96d2-674060397afe-trusted-ca\") pod \"9f71a554-e414-4bc3-96d2-674060397afe\" (UID: \"9f71a554-e414-4bc3-96d2-674060397afe\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010252 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8nb9c\" (UniqueName: \"kubernetes.io/projected/6edfcf45-925b-4eff-b940-95b6fc0b85d4-kube-api-access-8nb9c\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010304 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df6f4867-b098-485b-81b7-844ef832d471\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"265m\\\",\\\"memory\\\":\\\"1Gi\\\"},\\\"containerID\\\":\\\"cri-o://f193a62499ba084b7bc2ab4965ca8f7c645e6f17135a246bce0bba25105ae580\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"265m\\\",\\\"memory\\\":\\\"1Gi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-bundle-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://10b39007b1d5476031fa74088ab1fc27641cd0d8637344b799ccb7bd0d7ed170\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://3a34c408f150a15c63352bc45a7746036fdb5242aeda8b3d3f68c530dcacca16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://524b98e238697d411aaaba56575ee93f25656aefb79f17572a1504e3f52a32ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://524b98e238697d411aaaba56575ee93f25656aefb79f17572a1504e3f52a32ef\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T00:10:51Z\\\",\\\"message\\\":\\\" 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 00:10:51.157125 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 00:10:51.157152 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 00:10:51.157158 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 00:10:51.157164 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 00:10:51.157168 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 00:10:51.157171 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 00:10:51.157175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0130 00:10:51.157255 1 genericapiserver.go:546] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0130 00:10:51.160382 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController\\\\nI0130 00:10:51.160422 1 shared_informer.go:350] \\\\\\\"Waiting for caches to sync\\\\\\\" controller=\\\\\\\"RequestHeaderAuthRequestController\\\\\\\"\\\\nI0130 00:10:51.160477 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI0130 00:10:51.160476 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0130 00:10:51.160492 1 shared_informer.go:350] \\\\\\\"Waiting for caches to sync\\\\\\\" controller=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI0130 00:10:51.160597 1 shared_informer.go:350] \\\\\\\"Waiting for caches to sync\\\\\\\" controller=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nF0130 00:10:51.160637 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T00:10:50Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://77c6a4ce119d456c938cd7b1ab2a1187857c309661ce7a3c4ebafdc977385d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:44Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://84f00261046fc5a9b778b011faaff480069dd0eb02fda16510bbd01e21895988\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84f00261046fc5a9b778b011faaff480069dd0eb02fda16510bbd01e21895988\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008854 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f71a554-e414-4bc3-96d2-674060397afe-kube-api-access-ftwb6" (OuterVolumeSpecName: "kube-api-access-ftwb6") pod "9f71a554-e414-4bc3-96d2-674060397afe" (UID: "9f71a554-e414-4bc3-96d2-674060397afe"). InnerVolumeSpecName "kube-api-access-ftwb6". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008851 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010885 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/584e1f4a-8205-47d7-8efb-3afc6017c4c9-utilities" (OuterVolumeSpecName: "utilities") pod "584e1f4a-8205-47d7-8efb-3afc6017c4c9" (UID: "584e1f4a-8205-47d7-8efb-3afc6017c4c9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.008877 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09cfa50b-4138-4585-a53e-64dd3ab73335-config" (OuterVolumeSpecName: "config") pod "09cfa50b-4138-4585-a53e-64dd3ab73335" (UID: "09cfa50b-4138-4585-a53e-64dd3ab73335"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009028 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009123 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0dd0fbac-8c0d-4228-8faa-abbeedabf7db-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "0dd0fbac-8c0d-4228-8faa-abbeedabf7db" (UID: "0dd0fbac-8c0d-4228-8faa-abbeedabf7db"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009242 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "af33e427-6803-48c2-a76a-dd9deb7cbf9a" (UID: "af33e427-6803-48c2-a76a-dd9deb7cbf9a"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009391 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a555ff2e-0be6-46d5-897d-863bb92ae2b3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a555ff2e-0be6-46d5-897d-863bb92ae2b3" (UID: "a555ff2e-0be6-46d5-897d-863bb92ae2b3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009371 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d19cb085-0c5b-4810-b654-ce7923221d90-kube-api-access-m5lgh" (OuterVolumeSpecName: "kube-api-access-m5lgh") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "kube-api-access-m5lgh". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009554 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-kube-api-access-ddlk9" (OuterVolumeSpecName: "kube-api-access-ddlk9") pod "7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" (UID: "7fcc6409-8a0f-44c3-89e7-5aecd7610f8a"). InnerVolumeSpecName "kube-api-access-ddlk9". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009620 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7df94c10-441d-4386-93a6-6730fb7bcde0-kube-api-access-nmmzf" (OuterVolumeSpecName: "kube-api-access-nmmzf") pod "7df94c10-441d-4386-93a6-6730fb7bcde0" (UID: "7df94c10-441d-4386-93a6-6730fb7bcde0"). InnerVolumeSpecName "kube-api-access-nmmzf". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009728 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c491984c-7d4b-44aa-8c1e-d7974424fa47-config" (OuterVolumeSpecName: "config") pod "c491984c-7d4b-44aa-8c1e-d7974424fa47" (UID: "c491984c-7d4b-44aa-8c1e-d7974424fa47"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009749 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6077b63e-53a2-4f96-9d56-1ce0324e4913-tmp-dir" (OuterVolumeSpecName: "tmp-dir") pod "6077b63e-53a2-4f96-9d56-1ce0324e4913" (UID: "6077b63e-53a2-4f96-9d56-1ce0324e4913"). InnerVolumeSpecName "tmp-dir". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009867 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-kube-api-access-qqbfk" (OuterVolumeSpecName: "kube-api-access-qqbfk") pod "b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" (UID: "b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a"). InnerVolumeSpecName "kube-api-access-qqbfk". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.009864 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-ca-trust-extracted-pem" (OuterVolumeSpecName: "ca-trust-extracted-pem") pod "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" (UID: "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e"). InnerVolumeSpecName "ca-trust-extracted-pem". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010002 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "567683bd-0efc-4f21-b076-e28559628404" (UID: "567683bd-0efc-4f21-b076-e28559628404"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010033 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-kube-api-access-xxfcv" (OuterVolumeSpecName: "kube-api-access-xxfcv") pod "9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff" (UID: "9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff"). InnerVolumeSpecName "kube-api-access-xxfcv". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010240 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2325ffef-9d5b-447f-b00e-3efc429acefe-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "2325ffef-9d5b-447f-b00e-3efc429acefe" (UID: "2325ffef-9d5b-447f-b00e-3efc429acefe"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.011113 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.011131 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" (UID: "d45be74c-0d98-4d18-90e4-f7ef1b6daaf7"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.011246 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/593a3561-7760-45c5-8f91-5aaef7475d0f-kube-api-access-sbc2l" (OuterVolumeSpecName: "kube-api-access-sbc2l") pod "593a3561-7760-45c5-8f91-5aaef7475d0f" (UID: "593a3561-7760-45c5-8f91-5aaef7475d0f"). InnerVolumeSpecName "kube-api-access-sbc2l". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.011551 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-kube-api-access-ws8zz" (OuterVolumeSpecName: "kube-api-access-ws8zz") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af"). InnerVolumeSpecName "kube-api-access-ws8zz". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.011552 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81e39f7b-62e4-4fc9-992a-6535ce127a02-kube-api-access-pllx6" (OuterVolumeSpecName: "kube-api-access-pllx6") pod "81e39f7b-62e4-4fc9-992a-6535ce127a02" (UID: "81e39f7b-62e4-4fc9-992a-6535ce127a02"). InnerVolumeSpecName "kube-api-access-pllx6". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010266 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01080b46-74f1-4191-8755-5152a57b3b25-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01080b46-74f1-4191-8755-5152a57b3b25" (UID: "01080b46-74f1-4191-8755-5152a57b3b25"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010271 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce090a97-9ab6-4c40-a719-64ff2acd9778-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "ce090a97-9ab6-4c40-a719-64ff2acd9778" (UID: "ce090a97-9ab6-4c40-a719-64ff2acd9778"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010471 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010533 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010696 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7a88189-c967-4640-879e-27665747f20c-kube-api-access-8nspp" (OuterVolumeSpecName: "kube-api-access-8nspp") pod "a7a88189-c967-4640-879e-27665747f20c" (UID: "a7a88189-c967-4640-879e-27665747f20c"). InnerVolumeSpecName "kube-api-access-8nspp". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010718 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d565531a-ff86-4608-9d19-767de01ac31b-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "d565531a-ff86-4608-9d19-767de01ac31b" (UID: "d565531a-ff86-4608-9d19-767de01ac31b"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010772 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "567683bd-0efc-4f21-b076-e28559628404" (UID: "567683bd-0efc-4f21-b076-e28559628404"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010952 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/593a3561-7760-45c5-8f91-5aaef7475d0f-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "593a3561-7760-45c5-8f91-5aaef7475d0f" (UID: "593a3561-7760-45c5-8f91-5aaef7475d0f"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010982 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16bdd140-dce1-464c-ab47-dd5798d1d256-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "16bdd140-dce1-464c-ab47-dd5798d1d256" (UID: "16bdd140-dce1-464c-ab47-dd5798d1d256"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.011129 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f559dfa3-3917-43a2-97f6-61ddfda10e93-kube-api-access-hm9x7" (OuterVolumeSpecName: "kube-api-access-hm9x7") pod "f559dfa3-3917-43a2-97f6-61ddfda10e93" (UID: "f559dfa3-3917-43a2-97f6-61ddfda10e93"). InnerVolumeSpecName "kube-api-access-hm9x7". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.011664 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/567683bd-0efc-4f21-b076-e28559628404-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "567683bd-0efc-4f21-b076-e28559628404" (UID: "567683bd-0efc-4f21-b076-e28559628404"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.012008 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" (UID: "7fcc6409-8a0f-44c3-89e7-5aecd7610f8a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.012020 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f559dfa3-3917-43a2-97f6-61ddfda10e93" (UID: "f559dfa3-3917-43a2-97f6-61ddfda10e93"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.012025 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42a11a02-47e1-488f-b270-2679d3298b0e-kube-api-access-qgrkj" (OuterVolumeSpecName: "kube-api-access-qgrkj") pod "42a11a02-47e1-488f-b270-2679d3298b0e" (UID: "42a11a02-47e1-488f-b270-2679d3298b0e"). InnerVolumeSpecName "kube-api-access-qgrkj". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.012151 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "869851b9-7ffb-4af0-b166-1d8aa40a5f80" (UID: "869851b9-7ffb-4af0-b166-1d8aa40a5f80"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.012194 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/736c54fe-349c-4bb9-870a-d1c1d1c03831-client-ca" (OuterVolumeSpecName: "client-ca") pod "736c54fe-349c-4bb9-870a-d1c1d1c03831" (UID: "736c54fe-349c-4bb9-870a-d1c1d1c03831"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.012413 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7e2c886-118e-43bb-bef1-c78134de392b-tmp-dir" (OuterVolumeSpecName: "tmp-dir") pod "f7e2c886-118e-43bb-bef1-c78134de392b" (UID: "f7e2c886-118e-43bb-bef1-c78134de392b"). InnerVolumeSpecName "tmp-dir". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.012421 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6077b63e-53a2-4f96-9d56-1ce0324e4913-kube-api-access-zth6t" (OuterVolumeSpecName: "kube-api-access-zth6t") pod "6077b63e-53a2-4f96-9d56-1ce0324e4913" (UID: "6077b63e-53a2-4f96-9d56-1ce0324e4913"). InnerVolumeSpecName "kube-api-access-zth6t". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.012551 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff" (UID: "9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.012866 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-tmp" (OuterVolumeSpecName: "tmp") pod "d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" (UID: "d45be74c-0d98-4d18-90e4-f7ef1b6daaf7"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.013099 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42a11a02-47e1-488f-b270-2679d3298b0e-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "42a11a02-47e1-488f-b270-2679d3298b0e" (UID: "42a11a02-47e1-488f-b270-2679d3298b0e"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.013256 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-config" (OuterVolumeSpecName: "config") pod "d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" (UID: "d45be74c-0d98-4d18-90e4-f7ef1b6daaf7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.013451 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-kube-api-access-dztfv" (OuterVolumeSpecName: "kube-api-access-dztfv") pod "d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" (UID: "d45be74c-0d98-4d18-90e4-f7ef1b6daaf7"). InnerVolumeSpecName "kube-api-access-dztfv". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.013578 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/736c54fe-349c-4bb9-870a-d1c1d1c03831-tmp" (OuterVolumeSpecName: "tmp") pod "736c54fe-349c-4bb9-870a-d1c1d1c03831" (UID: "736c54fe-349c-4bb9-870a-d1c1d1c03831"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.013681 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-utilities" (OuterVolumeSpecName: "utilities") pod "71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a" (UID: "71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.013817 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e9b5059-1b3e-4067-a63d-2952cbe863af-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.013959 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81e39f7b-62e4-4fc9-992a-6535ce127a02-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "81e39f7b-62e4-4fc9-992a-6535ce127a02" (UID: "81e39f7b-62e4-4fc9-992a-6535ce127a02"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.013934 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-kube-api-access-ks6v2" (OuterVolumeSpecName: "kube-api-access-ks6v2") pod "71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a" (UID: "71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a"). InnerVolumeSpecName "kube-api-access-ks6v2". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.014023 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01080b46-74f1-4191-8755-5152a57b3b25-kube-api-access-w94wk" (OuterVolumeSpecName: "kube-api-access-w94wk") pod "01080b46-74f1-4191-8755-5152a57b3b25" (UID: "01080b46-74f1-4191-8755-5152a57b3b25"). InnerVolumeSpecName "kube-api-access-w94wk". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.014128 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b605f283-6f2e-42da-a838-54421690f7d0-utilities" (OuterVolumeSpecName: "utilities") pod "b605f283-6f2e-42da-a838-54421690f7d0" (UID: "b605f283-6f2e-42da-a838-54421690f7d0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.014358 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/301e1965-1754-483d-b6cc-bfae7038bbca-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "301e1965-1754-483d-b6cc-bfae7038bbca" (UID: "301e1965-1754-483d-b6cc-bfae7038bbca"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.014463 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-config" (OuterVolumeSpecName: "config") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.014471 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "18f80adb-c1c3-49ba-8ee4-932c851d3897" (UID: "18f80adb-c1c3-49ba-8ee4-932c851d3897"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.014602 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/584e1f4a-8205-47d7-8efb-3afc6017c4c9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "584e1f4a-8205-47d7-8efb-3afc6017c4c9" (UID: "584e1f4a-8205-47d7-8efb-3afc6017c4c9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.014607 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92dfbade-90b6-4169-8c07-72cff7f2c82b-tmp-dir" (OuterVolumeSpecName: "tmp-dir") pod "92dfbade-90b6-4169-8c07-72cff7f2c82b" (UID: "92dfbade-90b6-4169-8c07-72cff7f2c82b"). InnerVolumeSpecName "tmp-dir". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.014706 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0dd0fbac-8c0d-4228-8faa-abbeedabf7db-kube-api-access-q4smf" (OuterVolumeSpecName: "kube-api-access-q4smf") pod "0dd0fbac-8c0d-4228-8faa-abbeedabf7db" (UID: "0dd0fbac-8c0d-4228-8faa-abbeedabf7db"). InnerVolumeSpecName "kube-api-access-q4smf". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.014805 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "fc8db2c7-859d-47b3-a900-2bd0c0b2973b" (UID: "fc8db2c7-859d-47b3-a900-2bd0c0b2973b"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.014849 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/301e1965-1754-483d-b6cc-bfae7038bbca-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "301e1965-1754-483d-b6cc-bfae7038bbca" (UID: "301e1965-1754-483d-b6cc-bfae7038bbca"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.015071 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92dfbade-90b6-4169-8c07-72cff7f2c82b-kube-api-access-4g8ts" (OuterVolumeSpecName: "kube-api-access-4g8ts") pod "92dfbade-90b6-4169-8c07-72cff7f2c82b" (UID: "92dfbade-90b6-4169-8c07-72cff7f2c82b"). InnerVolumeSpecName "kube-api-access-4g8ts". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.015197 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ee8fbd3-1f81-4666-96da-5afc70819f1a-kube-api-access-d4tqq" (OuterVolumeSpecName: "kube-api-access-d4tqq") pod "6ee8fbd3-1f81-4666-96da-5afc70819f1a" (UID: "6ee8fbd3-1f81-4666-96da-5afc70819f1a"). InnerVolumeSpecName "kube-api-access-d4tqq". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.015270 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-config" (OuterVolumeSpecName: "config") pod "567683bd-0efc-4f21-b076-e28559628404" (UID: "567683bd-0efc-4f21-b076-e28559628404"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.015798 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7e8f42f-dc0e-424b-bb56-5ec849834888-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d7e8f42f-dc0e-424b-bb56-5ec849834888" (UID: "d7e8f42f-dc0e-424b-bb56-5ec849834888"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.015803 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" (UID: "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.015747 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a52afe44-fb37-46ed-a1f8-bf39727a3cbe-cert" (OuterVolumeSpecName: "cert") pod "a52afe44-fb37-46ed-a1f8-bf39727a3cbe" (UID: "a52afe44-fb37-46ed-a1f8-bf39727a3cbe"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016026 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" (UID: "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010992 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-image-registry-operator-tls\") pod \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\" (UID: \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016114 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "f559dfa3-3917-43a2-97f6-61ddfda10e93" (UID: "f559dfa3-3917-43a2-97f6-61ddfda10e93"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016119 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-config" (OuterVolumeSpecName: "config") pod "fc8db2c7-859d-47b3-a900-2bd0c0b2973b" (UID: "fc8db2c7-859d-47b3-a900-2bd0c0b2973b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016120 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/16bdd140-dce1-464c-ab47-dd5798d1d256-available-featuregates\") pod \"16bdd140-dce1-464c-ab47-dd5798d1d256\" (UID: \"16bdd140-dce1-464c-ab47-dd5798d1d256\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016149 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016150 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7afa918d-be67-40a6-803c-d3b0ae99d815-config" (OuterVolumeSpecName: "config") pod "7afa918d-be67-40a6-803c-d3b0ae99d815" (UID: "7afa918d-be67-40a6-803c-d3b0ae99d815"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016173 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/567683bd-0efc-4f21-b076-e28559628404-etcd-client\") pod \"567683bd-0efc-4f21-b076-e28559628404\" (UID: \"567683bd-0efc-4f21-b076-e28559628404\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016208 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-config\") pod \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\" (UID: \"6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016401 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-srv-cert\") pod \"7fcc6409-8a0f-44c3-89e7-5aecd7610f8a\" (UID: \"7fcc6409-8a0f-44c3-89e7-5aecd7610f8a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016431 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/a555ff2e-0be6-46d5-897d-863bb92ae2b3-tmp\") pod \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\" (UID: \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016472 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94a6e063-3d1a-4d44-875d-185291448c31-utilities\") pod \"94a6e063-3d1a-4d44-875d-185291448c31\" (UID: \"94a6e063-3d1a-4d44-875d-185291448c31\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016505 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pskd\" (UniqueName: \"kubernetes.io/projected/a555ff2e-0be6-46d5-897d-863bb92ae2b3-kube-api-access-8pskd\") pod \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\" (UID: \"a555ff2e-0be6-46d5-897d-863bb92ae2b3\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016558 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d7cps\" (UniqueName: \"kubernetes.io/projected/af41de71-79cf-4590-bbe9-9e8b848862cb-kube-api-access-d7cps\") pod \"af41de71-79cf-4590-bbe9-9e8b848862cb\" (UID: \"af41de71-79cf-4590-bbe9-9e8b848862cb\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016590 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09cfa50b-4138-4585-a53e-64dd3ab73335-serving-cert\") pod \"09cfa50b-4138-4585-a53e-64dd3ab73335\" (UID: \"09cfa50b-4138-4585-a53e-64dd3ab73335\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016615 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mfzkj\" (UniqueName: \"kubernetes.io/projected/0effdbcf-dd7d-404d-9d48-77536d665a5d-kube-api-access-mfzkj\") pod \"0effdbcf-dd7d-404d-9d48-77536d665a5d\" (UID: \"0effdbcf-dd7d-404d-9d48-77536d665a5d\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016643 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-serving-cert\") pod \"d19cb085-0c5b-4810-b654-ce7923221d90\" (UID: \"d19cb085-0c5b-4810-b654-ce7923221d90\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016670 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-default-certificate\") pod \"18f80adb-c1c3-49ba-8ee4-932c851d3897\" (UID: \"18f80adb-c1c3-49ba-8ee4-932c851d3897\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016699 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-cliconfig\") pod \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\" (UID: \"6edfcf45-925b-4eff-b940-95b6fc0b85d4\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016728 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b4750666-1362-4001-abd0-6f89964cc621-proxy-tls\") pod \"b4750666-1362-4001-abd0-6f89964cc621\" (UID: \"b4750666-1362-4001-abd0-6f89964cc621\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016754 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/ce090a97-9ab6-4c40-a719-64ff2acd9778-signing-key\") pod \"ce090a97-9ab6-4c40-a719-64ff2acd9778\" (UID: \"ce090a97-9ab6-4c40-a719-64ff2acd9778\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016780 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5rsr\" (UniqueName: \"kubernetes.io/projected/af33e427-6803-48c2-a76a-dd9deb7cbf9a-kube-api-access-z5rsr\") pod \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\" (UID: \"af33e427-6803-48c2-a76a-dd9deb7cbf9a\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016807 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-utilities\") pod \"9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff\" (UID: \"9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016833 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f65c0ac1-8bca-454d-a2e6-e35cb418beac-config\") pod \"f65c0ac1-8bca-454d-a2e6-e35cb418beac\" (UID: \"f65c0ac1-8bca-454d-a2e6-e35cb418beac\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016860 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26xrl\" (UniqueName: \"kubernetes.io/projected/a208c9c2-333b-4b4a-be0d-bc32ec38a821-kube-api-access-26xrl\") pod \"a208c9c2-333b-4b4a-be0d-bc32ec38a821\" (UID: \"a208c9c2-333b-4b4a-be0d-bc32ec38a821\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016891 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31fa8943-81cc-4750-a0b7-0fa9ab5af883-utilities\") pod \"31fa8943-81cc-4750-a0b7-0fa9ab5af883\" (UID: \"31fa8943-81cc-4750-a0b7-0fa9ab5af883\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016918 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-trusted-ca\") pod \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\" (UID: \"20ce4d18-fe25-4696-ad7c-1bd2d6200a3e\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016944 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hckvg\" (UniqueName: \"kubernetes.io/projected/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-kube-api-access-hckvg\") pod \"fc8db2c7-859d-47b3-a900-2bd0c0b2973b\" (UID: \"fc8db2c7-859d-47b3-a900-2bd0c0b2973b\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016968 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc85e424-18b2-4924-920b-bd291a8c4b01-utilities\") pod \"cc85e424-18b2-4924-920b-bd291a8c4b01\" (UID: \"cc85e424-18b2-4924-920b-bd291a8c4b01\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016998 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6dmhf\" (UniqueName: \"kubernetes.io/projected/736c54fe-349c-4bb9-870a-d1c1d1c03831-kube-api-access-6dmhf\") pod \"736c54fe-349c-4bb9-870a-d1c1d1c03831\" (UID: \"736c54fe-349c-4bb9-870a-d1c1d1c03831\") " Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017111 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-system-cni-dir\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017144 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"whereabouts-flatfile-configmap\" (UniqueName: \"kubernetes.io/configmap/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-whereabouts-flatfile-configmap\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017190 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6xzs\" (UniqueName: \"kubernetes.io/projected/27d4d422-313b-48d2-b7ec-7e914beaac62-kube-api-access-g6xzs\") pod \"ovnkube-control-plane-57b78d8988-trlrx\" (UID: \"27d4d422-313b-48d2-b7ec-7e914beaac62\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017220 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-multus-daemon-config\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017253 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqw79\" (UniqueName: \"kubernetes.io/projected/f4c38be5-b405-4caf-9ae9-e93c7ca572b1-kube-api-access-gqw79\") pod \"node-ca-4q767\" (UID: \"f4c38be5-b405-4caf-9ae9-e93c7ca572b1\") " pod="openshift-image-registry/node-ca-4q767" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017280 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/27d4d422-313b-48d2-b7ec-7e914beaac62-env-overrides\") pod \"ovnkube-control-plane-57b78d8988-trlrx\" (UID: \"27d4d422-313b-48d2-b7ec-7e914beaac62\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017312 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs\") pod \"network-metrics-daemon-qx4gj\" (UID: \"d655d34c-2969-43f2-8e93-455507c7cfda\") " pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017339 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-tuning-conf-dir\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017364 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-systemd-units\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017386 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-var-lib-openvswitch\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017411 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-ovn\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017458 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-lldsn\" (UniqueName: \"kubernetes.io/projected/d655d34c-2969-43f2-8e93-455507c7cfda-kube-api-access-lldsn\") pod \"network-metrics-daemon-qx4gj\" (UID: \"d655d34c-2969-43f2-8e93-455507c7cfda\") " pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017482 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6740364c-f52c-49d7-9841-823aa6f3894b-ovn-node-metrics-cert\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017570 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-kubelet\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017597 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-env-overrides\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017631 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/34177974-8d82-49d2-a763-391d0df3bbd8-host-etc-kube\") pod \"network-operator-7bdcf4f5bd-7fjxv\" (UID: \"34177974-8d82-49d2-a763-391d0df3bbd8\") " pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017657 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-hostroot\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017685 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-g27c2\" (UniqueName: \"kubernetes.io/projected/d9239213-5213-4f95-9acf-9d99c18c3f5a-kube-api-access-g27c2\") pod \"node-resolver-67v4x\" (UID: \"d9239213-5213-4f95-9acf-9d99c18c3f5a\") " pod="openshift-dns/node-resolver-67v4x" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017710 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-openvswitch\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017735 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/dccb6bc1-d2db-4bf2-a0db-1c84219d0499-proxy-tls\") pod \"machine-config-daemon-gxph5\" (UID: \"dccb6bc1-d2db-4bf2-a0db-1c84219d0499\") " pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017765 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-cnibin\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017788 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-run-k8s-cni-cncf-io\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017811 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-run-netns\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017836 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-multus-conf-dir\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017857 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-node-log\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017881 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/dccb6bc1-d2db-4bf2-a0db-1c84219d0499-rootfs\") pod \"machine-config-daemon-gxph5\" (UID: \"dccb6bc1-d2db-4bf2-a0db-1c84219d0499\") " pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017907 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/27d4d422-313b-48d2-b7ec-7e914beaac62-ovnkube-config\") pod \"ovnkube-control-plane-57b78d8988-trlrx\" (UID: \"27d4d422-313b-48d2-b7ec-7e914beaac62\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017936 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-cni-binary-copy\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017960 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-multus-socket-dir-parent\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017985 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-etc-kubernetes\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018010 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-run-multus-certs\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018044 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-var-lib-cni-bin\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018072 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-58vql\" (UniqueName: \"kubernetes.io/projected/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-kube-api-access-58vql\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018098 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d9239213-5213-4f95-9acf-9d99c18c3f5a-hosts-file\") pod \"node-resolver-67v4x\" (UID: \"d9239213-5213-4f95-9acf-9d99c18c3f5a\") " pod="openshift-dns/node-resolver-67v4x" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018122 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f4c38be5-b405-4caf-9ae9-e93c7ca572b1-host\") pod \"node-ca-4q767\" (UID: \"f4c38be5-b405-4caf-9ae9-e93c7ca572b1\") " pod="openshift-image-registry/node-ca-4q767" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018158 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-os-release\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018182 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-systemd\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018210 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018242 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-var-lib-kubelet\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018271 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-system-cni-dir\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018299 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-2f6lr\" (UniqueName: \"kubernetes.io/projected/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-kube-api-access-2f6lr\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018325 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-cnibin\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018352 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/d9239213-5213-4f95-9acf-9d99c18c3f5a-tmp-dir\") pod \"node-resolver-67v4x\" (UID: \"d9239213-5213-4f95-9acf-9d99c18c3f5a\") " pod="openshift-dns/node-resolver-67v4x" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018378 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-ovnkube-script-lib\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018401 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/dccb6bc1-d2db-4bf2-a0db-1c84219d0499-mcd-auth-proxy-config\") pod \"machine-config-daemon-gxph5\" (UID: \"dccb6bc1-d2db-4bf2-a0db-1c84219d0499\") " pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018442 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-multus-cni-dir\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018470 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-var-lib-cni-multus\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018497 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-run-ovn-kubernetes\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018529 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-ovnkube-config\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018597 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hczl\" (UniqueName: \"kubernetes.io/projected/6740364c-f52c-49d7-9841-823aa6f3894b-kube-api-access-2hczl\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018641 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-host-slash\") pod \"iptables-alerter-5jnd7\" (UID: \"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\") " pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018665 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018770 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-run-netns\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018790 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-etc-openvswitch\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018820 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f4c38be5-b405-4caf-9ae9-e93c7ca572b1-serviceca\") pod \"node-ca-4q767\" (UID: \"f4c38be5-b405-4caf-9ae9-e93c7ca572b1\") " pod="openshift-image-registry/node-ca-4q767" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018844 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/428b39f5-eb1c-4f65-b7a4-eeb6e84860cc-host-slash\") pod \"iptables-alerter-5jnd7\" (UID: \"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\") " pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018865 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-os-release\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018953 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-os-release\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.019031 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/34177974-8d82-49d2-a763-391d0df3bbd8-host-etc-kube\") pod \"network-operator-7bdcf4f5bd-7fjxv\" (UID: \"34177974-8d82-49d2-a763-391d0df3bbd8\") " pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.019070 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-hostroot\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.019298 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-cnibin\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.019339 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-run-k8s-cni-cncf-io\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.019372 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-run-netns\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.019404 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-multus-conf-dir\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016173 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "fc8db2c7-859d-47b3-a900-2bd0c0b2973b" (UID: "fc8db2c7-859d-47b3-a900-2bd0c0b2973b"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016189 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" (UID: "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.015829 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ebfebf6-3ecd-458e-943f-bb25b52e2718-kube-api-access-l87hs" (OuterVolumeSpecName: "kube-api-access-l87hs") pod "5ebfebf6-3ecd-458e-943f-bb25b52e2718" (UID: "5ebfebf6-3ecd-458e-943f-bb25b52e2718"). InnerVolumeSpecName "kube-api-access-l87hs". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016484 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-kube-api-access-9z4sw" (OuterVolumeSpecName: "kube-api-access-9z4sw") pod "e1d2a42d-af1d-4054-9618-ab545e0ed8b7" (UID: "e1d2a42d-af1d-4054-9618-ab545e0ed8b7"). InnerVolumeSpecName "kube-api-access-9z4sw". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.016700 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a52afe44-fb37-46ed-a1f8-bf39727a3cbe-kube-api-access-rzt4w" (OuterVolumeSpecName: "kube-api-access-rzt4w") pod "a52afe44-fb37-46ed-a1f8-bf39727a3cbe" (UID: "a52afe44-fb37-46ed-a1f8-bf39727a3cbe"). InnerVolumeSpecName "kube-api-access-rzt4w". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.019455 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/149b3c48-e17c-4a66-a835-d86dabf6ff13-kube-api-access-wj4qr" (OuterVolumeSpecName: "kube-api-access-wj4qr") pod "149b3c48-e17c-4a66-a835-d86dabf6ff13" (UID: "149b3c48-e17c-4a66-a835-d86dabf6ff13"). InnerVolumeSpecName "kube-api-access-wj4qr". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017075 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4-kube-api-access-pgx6b" (OuterVolumeSpecName: "kube-api-access-pgx6b") pod "f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4" (UID: "f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4"). InnerVolumeSpecName "kube-api-access-pgx6b". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017368 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c491984c-7d4b-44aa-8c1e-d7974424fa47-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "c491984c-7d4b-44aa-8c1e-d7974424fa47" (UID: "c491984c-7d4b-44aa-8c1e-d7974424fa47"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017403 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/736c54fe-349c-4bb9-870a-d1c1d1c03831-config" (OuterVolumeSpecName: "config") pod "736c54fe-349c-4bb9-870a-d1c1d1c03831" (UID: "736c54fe-349c-4bb9-870a-d1c1d1c03831"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.017726 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7df94c10-441d-4386-93a6-6730fb7bcde0-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "7df94c10-441d-4386-93a6-6730fb7bcde0" (UID: "7df94c10-441d-4386-93a6-6730fb7bcde0"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018057 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6edfcf45-925b-4eff-b940-95b6fc0b85d4-kube-api-access-8nb9c" (OuterVolumeSpecName: "kube-api-access-8nb9c") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "kube-api-access-8nb9c". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018125 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2325ffef-9d5b-447f-b00e-3efc429acefe-config" (OuterVolumeSpecName: "config") pod "2325ffef-9d5b-447f-b00e-3efc429acefe" (UID: "2325ffef-9d5b-447f-b00e-3efc429acefe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018349 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018442 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7df94c10-441d-4386-93a6-6730fb7bcde0-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "7df94c10-441d-4386-93a6-6730fb7bcde0" (UID: "7df94c10-441d-4386-93a6-6730fb7bcde0"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018465 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" (UID: "b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.018494 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/92dfbade-90b6-4169-8c07-72cff7f2c82b-config-volume" (OuterVolumeSpecName: "config-volume") pod "92dfbade-90b6-4169-8c07-72cff7f2c82b" (UID: "92dfbade-90b6-4169-8c07-72cff7f2c82b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.019404 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/567683bd-0efc-4f21-b076-e28559628404-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "567683bd-0efc-4f21-b076-e28559628404" (UID: "567683bd-0efc-4f21-b076-e28559628404"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.019477 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.020171 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-var-lib-kubelet\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.020268 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-multus-socket-dir-parent\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.020307 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-etc-kubernetes\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.020337 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-log-socket\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.020363 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-var-lib-cni-bin\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.020384 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-cni-bin\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.020420 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9b8kv\" (UniqueName: \"kubernetes.io/projected/dccb6bc1-d2db-4bf2-a0db-1c84219d0499-kube-api-access-9b8kv\") pod \"machine-config-daemon-gxph5\" (UID: \"dccb6bc1-d2db-4bf2-a0db-1c84219d0499\") " pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.020499 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-cni-binary-copy\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.020554 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-slash\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.020584 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-cni-netd\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.020614 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/27d4d422-313b-48d2-b7ec-7e914beaac62-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-57b78d8988-trlrx\" (UID: \"27d4d422-313b-48d2-b7ec-7e914beaac62\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.020870 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-os-release\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.020908 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c5f2bfad-70f6-4185-a3d9-81ce12720767-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.020917 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-system-cni-dir\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.020930 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f65c0ac1-8bca-454d-a2e6-e35cb418beac-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021010 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-run-multus-certs\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021059 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f65c0ac1-8bca-454d-a2e6-e35cb418beac-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021092 5113 reconciler_common.go:299] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021111 5113 reconciler_common.go:299] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7df94c10-441d-4386-93a6-6730fb7bcde0-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021122 5113 reconciler_common.go:299] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a52afe44-fb37-46ed-a1f8-bf39727a3cbe-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021158 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/584e1f4a-8205-47d7-8efb-3afc6017c4c9-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021171 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-pllx6\" (UniqueName: \"kubernetes.io/projected/81e39f7b-62e4-4fc9-992a-6535ce127a02-kube-api-access-pllx6\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021181 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021192 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-zth6t\" (UniqueName: \"kubernetes.io/projected/6077b63e-53a2-4f96-9d56-1ce0324e4913-kube-api-access-zth6t\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021203 5113 reconciler_common.go:299] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021213 5113 reconciler_common.go:299] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021224 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021234 5113 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/92dfbade-90b6-4169-8c07-72cff7f2c82b-config-volume\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021244 5113 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021286 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021436 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5f2bfad-70f6-4185-a3d9-81ce12720767-config" (OuterVolumeSpecName: "config") pod "c5f2bfad-70f6-4185-a3d9-81ce12720767" (UID: "c5f2bfad-70f6-4185-a3d9-81ce12720767"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021627 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d565531a-ff86-4608-9d19-767de01ac31b-kube-api-access-99zj9" (OuterVolumeSpecName: "kube-api-access-99zj9") pod "d565531a-ff86-4608-9d19-767de01ac31b" (UID: "d565531a-ff86-4608-9d19-767de01ac31b"). InnerVolumeSpecName "kube-api-access-99zj9". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021657 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-cnibin\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021701 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/d9239213-5213-4f95-9acf-9d99c18c3f5a-hosts-file\") pod \"node-resolver-67v4x\" (UID: \"d9239213-5213-4f95-9acf-9d99c18c3f5a\") " pod="openshift-dns/node-resolver-67v4x" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.021995 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31fa8943-81cc-4750-a0b7-0fa9ab5af883-utilities" (OuterVolumeSpecName: "utilities") pod "31fa8943-81cc-4750-a0b7-0fa9ab5af883" (UID: "31fa8943-81cc-4750-a0b7-0fa9ab5af883"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022054 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81e39f7b-62e4-4fc9-992a-6535ce127a02-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "81e39f7b-62e4-4fc9-992a-6535ce127a02" (UID: "81e39f7b-62e4-4fc9-992a-6535ce127a02"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022369 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/d9239213-5213-4f95-9acf-9d99c18c3f5a-tmp-dir\") pod \"node-resolver-67v4x\" (UID: \"d9239213-5213-4f95-9acf-9d99c18c3f5a\") " pod="openshift-dns/node-resolver-67v4x" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022372 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18f80adb-c1c3-49ba-8ee4-932c851d3897-kube-api-access-wbmqg" (OuterVolumeSpecName: "kube-api-access-wbmqg") pod "18f80adb-c1c3-49ba-8ee4-932c851d3897" (UID: "18f80adb-c1c3-49ba-8ee4-932c851d3897"). InnerVolumeSpecName "kube-api-access-wbmqg". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022485 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-multus-cni-dir\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022627 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-host-var-lib-cni-multus\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022780 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-sbc2l\" (UniqueName: \"kubernetes.io/projected/593a3561-7760-45c5-8f91-5aaef7475d0f-kube-api-access-sbc2l\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022809 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/16bdd140-dce1-464c-ab47-dd5798d1d256-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022819 5113 reconciler_common.go:299] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022832 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/584e1f4a-8205-47d7-8efb-3afc6017c4c9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022846 5113 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/736c54fe-349c-4bb9-870a-d1c1d1c03831-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022862 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022878 5113 reconciler_common.go:299] "Volume detached for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/6077b63e-53a2-4f96-9d56-1ce0324e4913-tmp-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022888 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2325ffef-9d5b-447f-b00e-3efc429acefe-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022898 5113 reconciler_common.go:299] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/301e1965-1754-483d-b6cc-bfae7038bbca-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022908 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022918 5113 reconciler_common.go:299] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/42a11a02-47e1-488f-b270-2679d3298b0e-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022931 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-dztfv\" (UniqueName: \"kubernetes.io/projected/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-kube-api-access-dztfv\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022942 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-ks6v2\" (UniqueName: \"kubernetes.io/projected/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-kube-api-access-ks6v2\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.022984 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023183 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d7e8f42f-dc0e-424b-bb56-5ec849834888-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023196 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023206 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-7jjkz\" (UniqueName: \"kubernetes.io/projected/301e1965-1754-483d-b6cc-bfae7038bbca-kube-api-access-7jjkz\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023245 5113 reconciler_common.go:299] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023255 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-8nspp\" (UniqueName: \"kubernetes.io/projected/a7a88189-c967-4640-879e-27665747f20c-kube-api-access-8nspp\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023267 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-qqbfk\" (UniqueName: \"kubernetes.io/projected/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-kube-api-access-qqbfk\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023281 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7afa918d-be67-40a6-803c-d3b0ae99d815-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023293 5113 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/736c54fe-349c-4bb9-870a-d1c1d1c03831-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023307 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023295 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/18f80adb-c1c3-49ba-8ee4-932c851d3897-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "18f80adb-c1c3-49ba-8ee4-932c851d3897" (UID: "18f80adb-c1c3-49ba-8ee4-932c851d3897"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023320 5113 reconciler_common.go:299] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023354 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/736c54fe-349c-4bb9-870a-d1c1d1c03831-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023366 5113 reconciler_common.go:299] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d565531a-ff86-4608-9d19-767de01ac31b-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023398 5113 reconciler_common.go:299] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023415 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023317 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "18f80adb-c1c3-49ba-8ee4-932c851d3897" (UID: "18f80adb-c1c3-49ba-8ee4-932c851d3897"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023427 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2325ffef-9d5b-447f-b00e-3efc429acefe-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023462 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-9z4sw\" (UniqueName: \"kubernetes.io/projected/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-kube-api-access-9z4sw\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023490 5113 reconciler_common.go:299] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/c491984c-7d4b-44aa-8c1e-d7974424fa47-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023508 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-8nb9c\" (UniqueName: \"kubernetes.io/projected/6edfcf45-925b-4eff-b940-95b6fc0b85d4-kube-api-access-8nb9c\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023563 5113 reconciler_common.go:299] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023579 5113 reconciler_common.go:299] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/567683bd-0efc-4f21-b076-e28559628404-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023595 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7599e0b6-bddf-4def-b7f2-0b32206e8651-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023609 5113 reconciler_common.go:299] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d565531a-ff86-4608-9d19-767de01ac31b-images\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023624 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-94l9h\" (UniqueName: \"kubernetes.io/projected/16bdd140-dce1-464c-ab47-dd5798d1d256-kube-api-access-94l9h\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023637 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023650 5113 reconciler_common.go:299] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023663 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-ptkcf\" (UniqueName: \"kubernetes.io/projected/7599e0b6-bddf-4def-b7f2-0b32206e8651-kube-api-access-ptkcf\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023676 5113 reconciler_common.go:299] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/d7e8f42f-dc0e-424b-bb56-5ec849834888-service-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023689 5113 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023702 5113 reconciler_common.go:299] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023716 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7afa918d-be67-40a6-803c-d3b0ae99d815-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023730 5113 reconciler_common.go:299] "Volume detached for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/f65c0ac1-8bca-454d-a2e6-e35cb418beac-tmp-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023744 5113 reconciler_common.go:299] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/c491984c-7d4b-44aa-8c1e-d7974424fa47-images\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023762 5113 reconciler_common.go:299] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/6ee8fbd3-1f81-4666-96da-5afc70819f1a-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023760 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-cni-binary-copy\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023776 5113 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023789 5113 reconciler_common.go:299] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023805 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01080b46-74f1-4191-8755-5152a57b3b25-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023818 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023834 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-ws8zz\" (UniqueName: \"kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-kube-api-access-ws8zz\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023887 5113 reconciler_common.go:299] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023900 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-ddlk9\" (UniqueName: \"kubernetes.io/projected/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-kube-api-access-ddlk9\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023912 5113 reconciler_common.go:299] "Volume detached for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/92dfbade-90b6-4169-8c07-72cff7f2c82b-tmp-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023926 5113 reconciler_common.go:299] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023939 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023952 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023965 5113 reconciler_common.go:299] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023978 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-l87hs\" (UniqueName: \"kubernetes.io/projected/5ebfebf6-3ecd-458e-943f-bb25b52e2718-kube-api-access-l87hs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.023992 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-pgx6b\" (UniqueName: \"kubernetes.io/projected/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4-kube-api-access-pgx6b\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024005 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-pddnv\" (UniqueName: \"kubernetes.io/projected/e093be35-bb62-4843-b2e8-094545761610-kube-api-access-pddnv\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024020 5113 reconciler_common.go:299] "Volume detached for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/567683bd-0efc-4f21-b076-e28559628404-tmp-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024032 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-6g4lr\" (UniqueName: \"kubernetes.io/projected/f7e2c886-118e-43bb-bef1-c78134de392b-kube-api-access-6g4lr\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024045 5113 reconciler_common.go:299] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024058 5113 reconciler_common.go:299] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024071 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-4g8ts\" (UniqueName: \"kubernetes.io/projected/92dfbade-90b6-4169-8c07-72cff7f2c82b-kube-api-access-4g8ts\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024084 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7afa918d-be67-40a6-803c-d3b0ae99d815-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024123 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c491984c-7d4b-44aa-8c1e-d7974424fa47-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024137 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a555ff2e-0be6-46d5-897d-863bb92ae2b3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024151 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01080b46-74f1-4191-8755-5152a57b3b25-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024164 5113 reconciler_common.go:299] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024201 5113 reconciler_common.go:299] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/81e39f7b-62e4-4fc9-992a-6535ce127a02-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024216 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-q4smf\" (UniqueName: \"kubernetes.io/projected/0dd0fbac-8c0d-4228-8faa-abbeedabf7db-kube-api-access-q4smf\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024230 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-qgrkj\" (UniqueName: \"kubernetes.io/projected/42a11a02-47e1-488f-b270-2679d3298b0e-kube-api-access-qgrkj\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024270 5113 reconciler_common.go:299] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/301e1965-1754-483d-b6cc-bfae7038bbca-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024284 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-wj4qr\" (UniqueName: \"kubernetes.io/projected/149b3c48-e17c-4a66-a835-d86dabf6ff13-kube-api-access-wj4qr\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024297 5113 reconciler_common.go:299] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/301e1965-1754-483d-b6cc-bfae7038bbca-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024313 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-4hb7m\" (UniqueName: \"kubernetes.io/projected/94a6e063-3d1a-4d44-875d-185291448c31-kube-api-access-4hb7m\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024326 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024340 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-9vsz9\" (UniqueName: \"kubernetes.io/projected/c491984c-7d4b-44aa-8c1e-d7974424fa47-kube-api-access-9vsz9\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024354 5113 reconciler_common.go:299] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024368 5113 reconciler_common.go:299] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/567683bd-0efc-4f21-b076-e28559628404-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024380 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/567683bd-0efc-4f21-b076-e28559628404-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024394 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-m5lgh\" (UniqueName: \"kubernetes.io/projected/d19cb085-0c5b-4810-b654-ce7923221d90-kube-api-access-m5lgh\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024407 5113 reconciler_common.go:299] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024421 5113 reconciler_common.go:299] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/593a3561-7760-45c5-8f91-5aaef7475d0f-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024433 5113 reconciler_common.go:299] "Volume detached for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/f7e2c886-118e-43bb-bef1-c78134de392b-tmp-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024446 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-w94wk\" (UniqueName: \"kubernetes.io/projected/01080b46-74f1-4191-8755-5152a57b3b25-kube-api-access-w94wk\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024460 5113 reconciler_common.go:299] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a7a88189-c967-4640-879e-27665747f20c-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024473 5113 reconciler_common.go:299] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024486 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-tknt7\" (UniqueName: \"kubernetes.io/projected/584e1f4a-8205-47d7-8efb-3afc6017c4c9-kube-api-access-tknt7\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024499 5113 reconciler_common.go:299] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024513 5113 reconciler_common.go:299] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2325ffef-9d5b-447f-b00e-3efc429acefe-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024543 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b605f283-6f2e-42da-a838-54421690f7d0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024556 5113 reconciler_common.go:299] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a208c9c2-333b-4b4a-be0d-bc32ec38a821-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024569 5113 reconciler_common.go:299] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7df94c10-441d-4386-93a6-6730fb7bcde0-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024583 5113 reconciler_common.go:299] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0dd0fbac-8c0d-4228-8faa-abbeedabf7db-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024598 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-d4tqq\" (UniqueName: \"kubernetes.io/projected/6ee8fbd3-1f81-4666-96da-5afc70819f1a-kube-api-access-d4tqq\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024612 5113 reconciler_common.go:299] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9e9b5059-1b3e-4067-a63d-2952cbe863af-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024625 5113 reconciler_common.go:299] "Volume detached for volume \"ca-trust-extracted-pem\" (UniqueName: \"kubernetes.io/empty-dir/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-ca-trust-extracted-pem\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024638 5113 reconciler_common.go:299] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024650 5113 reconciler_common.go:299] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9f71a554-e414-4bc3-96d2-674060397afe-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024663 5113 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024677 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-ftwb6\" (UniqueName: \"kubernetes.io/projected/9f71a554-e414-4bc3-96d2-674060397afe-kube-api-access-ftwb6\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024690 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-rzt4w\" (UniqueName: \"kubernetes.io/projected/a52afe44-fb37-46ed-a1f8-bf39727a3cbe-kube-api-access-rzt4w\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024703 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-nmmzf\" (UniqueName: \"kubernetes.io/projected/7df94c10-441d-4386-93a6-6730fb7bcde0-kube-api-access-nmmzf\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024716 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-xxfcv\" (UniqueName: \"kubernetes.io/projected/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-kube-api-access-xxfcv\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024729 5113 reconciler_common.go:299] "Volume detached for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/c5f2bfad-70f6-4185-a3d9-81ce12720767-tmp-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024743 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/736c54fe-349c-4bb9-870a-d1c1d1c03831-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024758 5113 reconciler_common.go:299] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024771 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09cfa50b-4138-4585-a53e-64dd3ab73335-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024806 5113 reconciler_common.go:299] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/ce090a97-9ab6-4c40-a719-64ff2acd9778-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024838 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-hm9x7\" (UniqueName: \"kubernetes.io/projected/f559dfa3-3917-43a2-97f6-61ddfda10e93-kube-api-access-hm9x7\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024857 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b605f283-6f2e-42da-a838-54421690f7d0-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024871 5113 reconciler_common.go:299] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024885 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024899 5113 reconciler_common.go:299] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7df94c10-441d-4386-93a6-6730fb7bcde0-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.024003 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-kube-api-access-l9stx" (OuterVolumeSpecName: "kube-api-access-l9stx") pod "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" (UID: "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca"). InnerVolumeSpecName "kube-api-access-l9stx". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.025245 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-audit" (OuterVolumeSpecName: "audit") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.025243 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16bdd140-dce1-464c-ab47-dd5798d1d256-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "16bdd140-dce1-464c-ab47-dd5798d1d256" (UID: "16bdd140-dce1-464c-ab47-dd5798d1d256"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.026156 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7a88189-c967-4640-879e-27665747f20c-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "a7a88189-c967-4640-879e-27665747f20c" (UID: "a7a88189-c967-4640-879e-27665747f20c"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.026306 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/149b3c48-e17c-4a66-a835-d86dabf6ff13-utilities" (OuterVolumeSpecName: "utilities") pod "149b3c48-e17c-4a66-a835-d86dabf6ff13" (UID: "149b3c48-e17c-4a66-a835-d86dabf6ff13"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.026587 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/multus-mbd62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2f6lr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mbd62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.027391 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-whereabouts-flatfile-configmap" (OuterVolumeSpecName: "whereabouts-flatfile-configmap") pod "869851b9-7ffb-4af0-b166-1d8aa40a5f80" (UID: "869851b9-7ffb-4af0-b166-1d8aa40a5f80"). InnerVolumeSpecName "whereabouts-flatfile-configmap". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.027419 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ebfebf6-3ecd-458e-943f-bb25b52e2718-serviceca" (OuterVolumeSpecName: "serviceca") pod "5ebfebf6-3ecd-458e-943f-bb25b52e2718" (UID: "5ebfebf6-3ecd-458e-943f-bb25b52e2718"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.027988 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.028590 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af41de71-79cf-4590-bbe9-9e8b848862cb-kube-api-access-d7cps" (OuterVolumeSpecName: "kube-api-access-d7cps") pod "af41de71-79cf-4590-bbe9-9e8b848862cb" (UID: "af41de71-79cf-4590-bbe9-9e8b848862cb"). InnerVolumeSpecName "kube-api-access-d7cps". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.028603 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce090a97-9ab6-4c40-a719-64ff2acd9778-signing-key" (OuterVolumeSpecName: "signing-key") pod "ce090a97-9ab6-4c40-a719-64ff2acd9778" (UID: "ce090a97-9ab6-4c40-a719-64ff2acd9778"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.029021 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.029144 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-utilities" (OuterVolumeSpecName: "utilities") pod "9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff" (UID: "9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.029219 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc85e424-18b2-4924-920b-bd291a8c4b01-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cc85e424-18b2-4924-920b-bd291a8c4b01" (UID: "cc85e424-18b2-4924-920b-bd291a8c4b01"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.029687 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/736c54fe-349c-4bb9-870a-d1c1d1c03831-kube-api-access-6dmhf" (OuterVolumeSpecName: "kube-api-access-6dmhf") pod "736c54fe-349c-4bb9-870a-d1c1d1c03831" (UID: "736c54fe-349c-4bb9-870a-d1c1d1c03831"). InnerVolumeSpecName "kube-api-access-6dmhf". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.029691 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-cni-binary-copy\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.029768 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-system-cni-dir\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.030338 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "869851b9-7ffb-4af0-b166-1d8aa40a5f80" (UID: "869851b9-7ffb-4af0-b166-1d8aa40a5f80"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.030468 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"whereabouts-flatfile-configmap\" (UniqueName: \"kubernetes.io/configmap/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-whereabouts-flatfile-configmap\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.030477 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0effdbcf-dd7d-404d-9d48-77536d665a5d-kube-api-access-mfzkj" (OuterVolumeSpecName: "kube-api-access-mfzkj") pod "0effdbcf-dd7d-404d-9d48-77536d665a5d" (UID: "0effdbcf-dd7d-404d-9d48-77536d665a5d"). InnerVolumeSpecName "kube-api-access-mfzkj". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.030826 5113 secret.go:189] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.030932 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs podName:d655d34c-2969-43f2-8e93-455507c7cfda nodeName:}" failed. No retries permitted until 2026-01-30 00:11:21.530900038 +0000 UTC m=+101.603505465 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs") pod "network-metrics-daemon-qx4gj" (UID: "d655d34c-2969-43f2-8e93-455507c7cfda") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.030939 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a208c9c2-333b-4b4a-be0d-bc32ec38a821-kube-api-access-26xrl" (OuterVolumeSpecName: "kube-api-access-26xrl") pod "a208c9c2-333b-4b4a-be0d-bc32ec38a821" (UID: "a208c9c2-333b-4b4a-be0d-bc32ec38a821"). InnerVolumeSpecName "kube-api-access-26xrl". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.031158 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-multus-daemon-config\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.031413 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/149b3c48-e17c-4a66-a835-d86dabf6ff13-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "149b3c48-e17c-4a66-a835-d86dabf6ff13" (UID: "149b3c48-e17c-4a66-a835-d86dabf6ff13"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.031415 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-tuning-conf-dir\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.032094 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/869851b9-7ffb-4af0-b166-1d8aa40a5f80-kube-api-access-mjwtd" (OuterVolumeSpecName: "kube-api-access-mjwtd") pod "869851b9-7ffb-4af0-b166-1d8aa40a5f80" (UID: "869851b9-7ffb-4af0-b166-1d8aa40a5f80"). InnerVolumeSpecName "kube-api-access-mjwtd". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.032283 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.032331 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31fa8943-81cc-4750-a0b7-0fa9ab5af883-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "31fa8943-81cc-4750-a0b7-0fa9ab5af883" (UID: "31fa8943-81cc-4750-a0b7-0fa9ab5af883"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.033045 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6077b63e-53a2-4f96-9d56-1ce0324e4913-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "6077b63e-53a2-4f96-9d56-1ce0324e4913" (UID: "6077b63e-53a2-4f96-9d56-1ce0324e4913"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.033195 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7afa918d-be67-40a6-803c-d3b0ae99d815-tmp" (OuterVolumeSpecName: "tmp") pod "7afa918d-be67-40a6-803c-d3b0ae99d815" (UID: "7afa918d-be67-40a6-803c-d3b0ae99d815"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.033255 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a555ff2e-0be6-46d5-897d-863bb92ae2b3-tmp" (OuterVolumeSpecName: "tmp") pod "a555ff2e-0be6-46d5-897d-863bb92ae2b3" (UID: "a555ff2e-0be6-46d5-897d-863bb92ae2b3"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.033473 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "e1d2a42d-af1d-4054-9618-ab545e0ed8b7" (UID: "e1d2a42d-af1d-4054-9618-ab545e0ed8b7"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.033713 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "f559dfa3-3917-43a2-97f6-61ddfda10e93" (UID: "f559dfa3-3917-43a2-97f6-61ddfda10e93"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.033768 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/567683bd-0efc-4f21-b076-e28559628404-kube-api-access-m26jq" (OuterVolumeSpecName: "kube-api-access-m26jq") pod "567683bd-0efc-4f21-b076-e28559628404" (UID: "567683bd-0efc-4f21-b076-e28559628404"). InnerVolumeSpecName "kube-api-access-m26jq". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.033802 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7a88189-c967-4640-879e-27665747f20c-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "a7a88189-c967-4640-879e-27665747f20c" (UID: "a7a88189-c967-4640-879e-27665747f20c"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.033908 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:21.533883581 +0000 UTC m=+101.606488958 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.034112 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d565531a-ff86-4608-9d19-767de01ac31b-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "d565531a-ff86-4608-9d19-767de01ac31b" (UID: "d565531a-ff86-4608-9d19-767de01ac31b"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.034378 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2325ffef-9d5b-447f-b00e-3efc429acefe-kube-api-access-zg8nc" (OuterVolumeSpecName: "kube-api-access-zg8nc") pod "2325ffef-9d5b-447f-b00e-3efc429acefe" (UID: "2325ffef-9d5b-447f-b00e-3efc429acefe"). InnerVolumeSpecName "kube-api-access-zg8nc". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.035432 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "18f80adb-c1c3-49ba-8ee4-932c851d3897" (UID: "18f80adb-c1c3-49ba-8ee4-932c851d3897"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.035700 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce090a97-9ab6-4c40-a719-64ff2acd9778-kube-api-access-xnxbn" (OuterVolumeSpecName: "kube-api-access-xnxbn") pod "ce090a97-9ab6-4c40-a719-64ff2acd9778" (UID: "ce090a97-9ab6-4c40-a719-64ff2acd9778"). InnerVolumeSpecName "kube-api-access-xnxbn". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.035922 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a555ff2e-0be6-46d5-897d-863bb92ae2b3-kube-api-access-8pskd" (OuterVolumeSpecName: "kube-api-access-8pskd") pod "a555ff2e-0be6-46d5-897d-863bb92ae2b3" (UID: "a555ff2e-0be6-46d5-897d-863bb92ae2b3"). InnerVolumeSpecName "kube-api-access-8pskd". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.035996 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "f559dfa3-3917-43a2-97f6-61ddfda10e93" (UID: "f559dfa3-3917-43a2-97f6-61ddfda10e93"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.036153 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31fa8943-81cc-4750-a0b7-0fa9ab5af883-kube-api-access-grwfz" (OuterVolumeSpecName: "kube-api-access-grwfz") pod "31fa8943-81cc-4750-a0b7-0fa9ab5af883" (UID: "31fa8943-81cc-4750-a0b7-0fa9ab5af883"). InnerVolumeSpecName "kube-api-access-grwfz". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.036269 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/593a3561-7760-45c5-8f91-5aaef7475d0f-certs" (OuterVolumeSpecName: "certs") pod "593a3561-7760-45c5-8f91-5aaef7475d0f" (UID: "593a3561-7760-45c5-8f91-5aaef7475d0f"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.010263 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f71a554-e414-4bc3-96d2-674060397afe-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "9f71a554-e414-4bc3-96d2-674060397afe" (UID: "9f71a554-e414-4bc3-96d2-674060397afe"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.036734 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc85e424-18b2-4924-920b-bd291a8c4b01-kube-api-access-xfp5s" (OuterVolumeSpecName: "kube-api-access-xfp5s") pod "cc85e424-18b2-4924-920b-bd291a8c4b01" (UID: "cc85e424-18b2-4924-920b-bd291a8c4b01"). InnerVolumeSpecName "kube-api-access-xfp5s". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.036959 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "d19cb085-0c5b-4810-b654-ce7923221d90" (UID: "d19cb085-0c5b-4810-b654-ce7923221d90"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.037615 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.037763 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "af33e427-6803-48c2-a76a-dd9deb7cbf9a" (UID: "af33e427-6803-48c2-a76a-dd9deb7cbf9a"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.038189 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.038225 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.038238 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.038256 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.038272 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:21Z","lastTransitionTime":"2026-01-30T00:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.039665 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6efa070ceb93cc5fc2e76eab6d9c96ac3c4f8812085d0b6eb6e3f513b5bac782\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3454e762466e22e2a893650b9781823558bc6fdfda2aa4188aff3cb819014c4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/etc/whereabouts/config\\\",\\\"name\\\":\\\"whereabouts-flatfile-configmap\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-t4r5k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.039790 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-2f6lr\" (UniqueName: \"kubernetes.io/projected/8ad95d7b-7c01-4672-8614-0cc8e52c0d79-kube-api-access-2f6lr\") pod \"multus-mbd62\" (UID: \"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\") " pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.039965 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-dgvkt" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.040547 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.040835 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" (UID: "7fcc6409-8a0f-44c3-89e7-5aecd7610f8a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.040892 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e9b5059-1b3e-4067-a63d-2952cbe863af-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.040940 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.041473 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-g27c2\" (UniqueName: \"kubernetes.io/projected/d9239213-5213-4f95-9acf-9d99c18c3f5a-kube-api-access-g27c2\") pod \"node-resolver-67v4x\" (UID: \"d9239213-5213-4f95-9acf-9d99c18c3f5a\") " pod="openshift-dns/node-resolver-67v4x" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.041871 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f65c0ac1-8bca-454d-a2e6-e35cb418beac-config" (OuterVolumeSpecName: "config") pod "f65c0ac1-8bca-454d-a2e6-e35cb418beac" (UID: "f65c0ac1-8bca-454d-a2e6-e35cb418beac"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.042005 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-lldsn\" (UniqueName: \"kubernetes.io/projected/d655d34c-2969-43f2-8e93-455507c7cfda-kube-api-access-lldsn\") pod \"network-metrics-daemon-qx4gj\" (UID: \"d655d34c-2969-43f2-8e93-455507c7cfda\") " pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.044979 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e9b5059-1b3e-4067-a63d-2952cbe863af-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.046920 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-58vql\" (UniqueName: \"kubernetes.io/projected/6ab4dd28-7902-4bc5-959f-47cd8cebb0c1-kube-api-access-58vql\") pod \"multus-additional-cni-plugins-t4r5k\" (UID: \"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\") " pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.048987 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-5jnd7" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.049662 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-dns/node-resolver-67v4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9239213-5213-4f95-9acf-9d99c18c3f5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g27c2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-67v4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.050449 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.051338 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09cfa50b-4138-4585-a53e-64dd3ab73335-kube-api-access-zsb9b" (OuterVolumeSpecName: "kube-api-access-zsb9b") pod "09cfa50b-4138-4585-a53e-64dd3ab73335" (UID: "09cfa50b-4138-4585-a53e-64dd3ab73335"). InnerVolumeSpecName "kube-api-access-zsb9b". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.051450 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" (UID: "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.051858 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4750666-1362-4001-abd0-6f89964cc621-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "b4750666-1362-4001-abd0-6f89964cc621" (UID: "b4750666-1362-4001-abd0-6f89964cc621"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.052214 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09cfa50b-4138-4585-a53e-64dd3ab73335-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09cfa50b-4138-4585-a53e-64dd3ab73335" (UID: "09cfa50b-4138-4585-a53e-64dd3ab73335"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.052277 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92dfbade-90b6-4169-8c07-72cff7f2c82b-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "92dfbade-90b6-4169-8c07-72cff7f2c82b" (UID: "92dfbade-90b6-4169-8c07-72cff7f2c82b"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.052352 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94a6e063-3d1a-4d44-875d-185291448c31-utilities" (OuterVolumeSpecName: "utilities") pod "94a6e063-3d1a-4d44-875d-185291448c31" (UID: "94a6e063-3d1a-4d44-875d-185291448c31"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.052108 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-kube-api-access-5lcfw" (OuterVolumeSpecName: "kube-api-access-5lcfw") pod "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" (UID: "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9"). InnerVolumeSpecName "kube-api-access-5lcfw". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.052620 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-kube-api-access-hckvg" (OuterVolumeSpecName: "kube-api-access-hckvg") pod "fc8db2c7-859d-47b3-a900-2bd0c0b2973b" (UID: "fc8db2c7-859d-47b3-a900-2bd0c0b2973b"). InnerVolumeSpecName "kube-api-access-hckvg". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.052723 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5f2bfad-70f6-4185-a3d9-81ce12720767-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c5f2bfad-70f6-4185-a3d9-81ce12720767" (UID: "c5f2bfad-70f6-4185-a3d9-81ce12720767"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.052745 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.052852 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cc85e424-18b2-4924-920b-bd291a8c4b01-utilities" (OuterVolumeSpecName: "utilities") pod "cc85e424-18b2-4924-920b-bd291a8c4b01" (UID: "cc85e424-18b2-4924-920b-bd291a8c4b01"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.052926 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" (UID: "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.052930 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" (UID: "dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.052817 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.053307 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4750666-1362-4001-abd0-6f89964cc621-kube-api-access-twvbl" (OuterVolumeSpecName: "kube-api-access-twvbl") pod "b4750666-1362-4001-abd0-6f89964cc621" (UID: "b4750666-1362-4001-abd0-6f89964cc621"). InnerVolumeSpecName "kube-api-access-twvbl". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.053358 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.053706 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "6edfcf45-925b-4eff-b940-95b6fc0b85d4" (UID: "6edfcf45-925b-4eff-b940-95b6fc0b85d4"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.054026 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4750666-1362-4001-abd0-6f89964cc621-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "b4750666-1362-4001-abd0-6f89964cc621" (UID: "b4750666-1362-4001-abd0-6f89964cc621"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.055468 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-service-ca" (OuterVolumeSpecName: "service-ca") pod "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" (UID: "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.055506 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f71a554-e414-4bc3-96d2-674060397afe-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9f71a554-e414-4bc3-96d2-674060397afe" (UID: "9f71a554-e414-4bc3-96d2-674060397afe"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.055818 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b605f283-6f2e-42da-a838-54421690f7d0-kube-api-access-6rmnv" (OuterVolumeSpecName: "kube-api-access-6rmnv") pod "b605f283-6f2e-42da-a838-54421690f7d0" (UID: "b605f283-6f2e-42da-a838-54421690f7d0"). InnerVolumeSpecName "kube-api-access-6rmnv". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.056012 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7e8f42f-dc0e-424b-bb56-5ec849834888-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "d7e8f42f-dc0e-424b-bb56-5ec849834888" (UID: "d7e8f42f-dc0e-424b-bb56-5ec849834888"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.057268 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "f559dfa3-3917-43a2-97f6-61ddfda10e93" (UID: "f559dfa3-3917-43a2-97f6-61ddfda10e93"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.057634 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "f559dfa3-3917-43a2-97f6-61ddfda10e93" (UID: "f559dfa3-3917-43a2-97f6-61ddfda10e93"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.057759 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af33e427-6803-48c2-a76a-dd9deb7cbf9a-kube-api-access-z5rsr" (OuterVolumeSpecName: "kube-api-access-z5rsr") pod "af33e427-6803-48c2-a76a-dd9deb7cbf9a" (UID: "af33e427-6803-48c2-a76a-dd9deb7cbf9a"). InnerVolumeSpecName "kube-api-access-z5rsr". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.057848 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7599e0b6-bddf-4def-b7f2-0b32206e8651-config" (OuterVolumeSpecName: "config") pod "7599e0b6-bddf-4def-b7f2-0b32206e8651" (UID: "7599e0b6-bddf-4def-b7f2-0b32206e8651"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.058006 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-config" (OuterVolumeSpecName: "console-config") pod "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" (UID: "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.058090 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "af33e427-6803-48c2-a76a-dd9deb7cbf9a" (UID: "af33e427-6803-48c2-a76a-dd9deb7cbf9a"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.060486 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-kube-api-access-tkdh6" (OuterVolumeSpecName: "kube-api-access-tkdh6") pod "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" (UID: "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e"). InnerVolumeSpecName "kube-api-access-tkdh6". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.060769 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" (UID: "20ce4d18-fe25-4696-ad7c-1bd2d6200a3e"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.063010 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dccb6bc1-d2db-4bf2-a0db-1c84219d0499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9b8kv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9b8kv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gxph5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.063472 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" (UID: "6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.077203 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-config" (OuterVolumeSpecName: "config") pod "a555ff2e-0be6-46d5-897d-863bb92ae2b3" (UID: "a555ff2e-0be6-46d5-897d-863bb92ae2b3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.077712 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4c6c3b9f-83fe-4921-bed0-c97e707ee433\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://fb5de81be22616fca47976a0d24ab6c6b330a5560704d7c9bd3f30816a6a53c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"60m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://6e418d2037fa46413cbb5c58dc73ecf2ecc6f110ebd3bfec9715e53ec0b6c855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"60m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://c01e6a834297ca754c755531034c6a4ded795a39c18f0f97cc0dc73214b2356a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://d32db858499f6acdd8dcfeec470facb33761e98b6974386bdf9e165a721026b8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.088515 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34177974-8d82-49d2-a763-391d0df3bbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m7xz2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-7bdcf4f5bd-7fjxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.088708 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-mbd62" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.094109 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a" (UID: "71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.098596 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94a6e063-3d1a-4d44-875d-185291448c31-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "94a6e063-3d1a-4d44-875d-185291448c31" (UID: "94a6e063-3d1a-4d44-875d-185291448c31"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.099088 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f863fff9-286a-45fa-b8f0-8a86994b8440\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l7w75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5bb8f5cd97-xdvz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.104772 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e9b5059-1b3e-4067-a63d-2952cbe863af-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:11:21 crc kubenswrapper[5113]: W0130 00:11:21.106283 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ad95d7b_7c01_4672_8614_0cc8e52c0d79.slice/crio-59e41d3b22a01cde02a88eca51a000e6a552cfe80ebfb52c7f85bb41deef7467 WatchSource:0}: Error finding container 59e41d3b22a01cde02a88eca51a000e6a552cfe80ebfb52c7f85bb41deef7467: Status 404 returned error can't find the container with id 59e41d3b22a01cde02a88eca51a000e6a552cfe80ebfb52c7f85bb41deef7467 Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.112480 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-dgvkt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc4541ce-7789-4670-bc75-5c2868e52ce0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-dgvkt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.116423 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.121372 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qx4gj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d655d34c-2969-43f2-8e93-455507c7cfda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lldsn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:49b34ce0d25eec7a6077f4bf21bf7d4e64e598d28785a20b9ee3594423b7de14\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lldsn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qx4gj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.125579 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-systemd\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.125641 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.125680 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-ovnkube-script-lib\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.125698 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-systemd\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.125715 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/dccb6bc1-d2db-4bf2-a0db-1c84219d0499-mcd-auth-proxy-config\") pod \"machine-config-daemon-gxph5\" (UID: \"dccb6bc1-d2db-4bf2-a0db-1c84219d0499\") " pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.125763 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.125775 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-run-ovn-kubernetes\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.125829 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-ovnkube-config\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.125856 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-2hczl\" (UniqueName: \"kubernetes.io/projected/6740364c-f52c-49d7-9841-823aa6f3894b-kube-api-access-2hczl\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.125903 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-run-netns\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.125925 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-etc-openvswitch\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.125963 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f4c38be5-b405-4caf-9ae9-e93c7ca572b1-serviceca\") pod \"node-ca-4q767\" (UID: \"f4c38be5-b405-4caf-9ae9-e93c7ca572b1\") " pod="openshift-image-registry/node-ca-4q767" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.125987 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-log-socket\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126006 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-cni-bin\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126045 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-9b8kv\" (UniqueName: \"kubernetes.io/projected/dccb6bc1-d2db-4bf2-a0db-1c84219d0499-kube-api-access-9b8kv\") pod \"machine-config-daemon-gxph5\" (UID: \"dccb6bc1-d2db-4bf2-a0db-1c84219d0499\") " pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126078 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-slash\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126116 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-cni-netd\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126142 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/27d4d422-313b-48d2-b7ec-7e914beaac62-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-57b78d8988-trlrx\" (UID: \"27d4d422-313b-48d2-b7ec-7e914beaac62\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126170 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-g6xzs\" (UniqueName: \"kubernetes.io/projected/27d4d422-313b-48d2-b7ec-7e914beaac62-kube-api-access-g6xzs\") pod \"ovnkube-control-plane-57b78d8988-trlrx\" (UID: \"27d4d422-313b-48d2-b7ec-7e914beaac62\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126209 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gqw79\" (UniqueName: \"kubernetes.io/projected/f4c38be5-b405-4caf-9ae9-e93c7ca572b1-kube-api-access-gqw79\") pod \"node-ca-4q767\" (UID: \"f4c38be5-b405-4caf-9ae9-e93c7ca572b1\") " pod="openshift-image-registry/node-ca-4q767" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126230 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/27d4d422-313b-48d2-b7ec-7e914beaac62-env-overrides\") pod \"ovnkube-control-plane-57b78d8988-trlrx\" (UID: \"27d4d422-313b-48d2-b7ec-7e914beaac62\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126278 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-systemd-units\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126300 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-var-lib-openvswitch\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126321 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-ovn\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126374 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6740364c-f52c-49d7-9841-823aa6f3894b-ovn-node-metrics-cert\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126411 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-kubelet\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126455 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-env-overrides\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126483 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-openvswitch\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126535 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/dccb6bc1-d2db-4bf2-a0db-1c84219d0499-proxy-tls\") pod \"machine-config-daemon-gxph5\" (UID: \"dccb6bc1-d2db-4bf2-a0db-1c84219d0499\") " pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126563 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-node-log\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126605 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/dccb6bc1-d2db-4bf2-a0db-1c84219d0499-rootfs\") pod \"machine-config-daemon-gxph5\" (UID: \"dccb6bc1-d2db-4bf2-a0db-1c84219d0499\") " pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126630 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/27d4d422-313b-48d2-b7ec-7e914beaac62-ovnkube-config\") pod \"ovnkube-control-plane-57b78d8988-trlrx\" (UID: \"27d4d422-313b-48d2-b7ec-7e914beaac62\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126681 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f4c38be5-b405-4caf-9ae9-e93c7ca572b1-host\") pod \"node-ca-4q767\" (UID: \"f4c38be5-b405-4caf-9ae9-e93c7ca572b1\") " pod="openshift-image-registry/node-ca-4q767" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126784 5113 reconciler_common.go:299] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/18f80adb-c1c3-49ba-8ee4-932c851d3897-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126864 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-var-lib-openvswitch\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.126907 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/dccb6bc1-d2db-4bf2-a0db-1c84219d0499-rootfs\") pod \"machine-config-daemon-gxph5\" (UID: \"dccb6bc1-d2db-4bf2-a0db-1c84219d0499\") " pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.125802 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-run-ovn-kubernetes\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.127003 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-node-log\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.127107 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-ovnkube-script-lib\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.127124 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-ovn\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.127279 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-kubelet\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.127332 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-run-netns\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.127445 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/dccb6bc1-d2db-4bf2-a0db-1c84219d0499-mcd-auth-proxy-config\") pod \"machine-config-daemon-gxph5\" (UID: \"dccb6bc1-d2db-4bf2-a0db-1c84219d0499\") " pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.127582 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.127689 5113 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/7afa918d-be67-40a6-803c-d3b0ae99d815-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.127762 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-log-socket\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.127780 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-ovnkube-config\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.127617 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f4c38be5-b405-4caf-9ae9-e93c7ca572b1-host\") pod \"node-ca-4q767\" (UID: \"f4c38be5-b405-4caf-9ae9-e93c7ca572b1\") " pod="openshift-image-registry/node-ca-4q767" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.127776 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31fa8943-81cc-4750-a0b7-0fa9ab5af883-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.127975 5113 reconciler_common.go:299] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9e9b5059-1b3e-4067-a63d-2952cbe863af-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128066 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-cni-bin\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128076 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5f2bfad-70f6-4185-a3d9-81ce12720767-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128117 5113 reconciler_common.go:299] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a7a88189-c967-4640-879e-27665747f20c-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128131 5113 reconciler_common.go:299] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128146 5113 reconciler_common.go:299] "Volume detached for volume \"whereabouts-flatfile-configmap\" (UniqueName: \"kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-whereabouts-flatfile-configmap\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128159 5113 reconciler_common.go:299] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-service-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128174 5113 reconciler_common.go:299] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128187 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128200 5113 reconciler_common.go:299] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d565531a-ff86-4608-9d19-767de01ac31b-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128213 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-6rmnv\" (UniqueName: \"kubernetes.io/projected/b605f283-6f2e-42da-a838-54421690f7d0-kube-api-access-6rmnv\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128226 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7599e0b6-bddf-4def-b7f2-0b32206e8651-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128240 5113 reconciler_common.go:299] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/593a3561-7760-45c5-8f91-5aaef7475d0f-certs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128252 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-zg8nc\" (UniqueName: \"kubernetes.io/projected/2325ffef-9d5b-447f-b00e-3efc429acefe-kube-api-access-zg8nc\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128264 5113 reconciler_common.go:299] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/af33e427-6803-48c2-a76a-dd9deb7cbf9a-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128277 5113 reconciler_common.go:299] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/af33e427-6803-48c2-a76a-dd9deb7cbf9a-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128288 5113 reconciler_common.go:299] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9e9b5059-1b3e-4067-a63d-2952cbe863af-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128299 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-m26jq\" (UniqueName: \"kubernetes.io/projected/567683bd-0efc-4f21-b076-e28559628404-kube-api-access-m26jq\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128312 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-grwfz\" (UniqueName: \"kubernetes.io/projected/31fa8943-81cc-4750-a0b7-0fa9ab5af883-kube-api-access-grwfz\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128325 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d7e8f42f-dc0e-424b-bb56-5ec849834888-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128337 5113 reconciler_common.go:299] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/92dfbade-90b6-4169-8c07-72cff7f2c82b-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128349 5113 reconciler_common.go:299] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b4750666-1362-4001-abd0-6f89964cc621-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128361 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94a6e063-3d1a-4d44-875d-185291448c31-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128373 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128385 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-zsb9b\" (UniqueName: \"kubernetes.io/projected/09cfa50b-4138-4585-a53e-64dd3ab73335-kube-api-access-zsb9b\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128389 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/27d4d422-313b-48d2-b7ec-7e914beaac62-ovnkube-config\") pod \"ovnkube-control-plane-57b78d8988-trlrx\" (UID: \"27d4d422-313b-48d2-b7ec-7e914beaac62\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.127723 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-etc-openvswitch\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128430 5113 reconciler_common.go:299] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/5ebfebf6-3ecd-458e-943f-bb25b52e2718-serviceca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128456 5113 reconciler_common.go:299] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6077b63e-53a2-4f96-9d56-1ce0324e4913-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128459 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-openvswitch\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128468 5113 reconciler_common.go:299] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9f71a554-e414-4bc3-96d2-674060397afe-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128495 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-systemd-units\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128503 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128543 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/149b3c48-e17c-4a66-a835-d86dabf6ff13-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128557 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/149b3c48-e17c-4a66-a835-d86dabf6ff13-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128568 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-cni-netd\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128584 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-env-overrides\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128590 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-l9stx\" (UniqueName: \"kubernetes.io/projected/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-kube-api-access-l9stx\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128646 5113 reconciler_common.go:299] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/81e39f7b-62e4-4fc9-992a-6535ce127a02-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128666 5113 reconciler_common.go:299] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128685 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-wbmqg\" (UniqueName: \"kubernetes.io/projected/18f80adb-c1c3-49ba-8ee4-932c851d3897-kube-api-access-wbmqg\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128703 5113 reconciler_common.go:299] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128040 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-slash\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128720 5113 reconciler_common.go:299] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9e9b5059-1b3e-4067-a63d-2952cbe863af-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128739 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-5lcfw\" (UniqueName: \"kubernetes.io/projected/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-kube-api-access-5lcfw\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128757 5113 reconciler_common.go:299] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128774 5113 reconciler_common.go:299] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128792 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-tkdh6\" (UniqueName: \"kubernetes.io/projected/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-kube-api-access-tkdh6\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128809 5113 reconciler_common.go:299] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a7a88189-c967-4640-879e-27665747f20c-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128827 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128846 5113 reconciler_common.go:299] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/e1d2a42d-af1d-4054-9618-ab545e0ed8b7-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128864 5113 reconciler_common.go:299] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128882 5113 reconciler_common.go:299] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128899 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-xnxbn\" (UniqueName: \"kubernetes.io/projected/ce090a97-9ab6-4c40-a719-64ff2acd9778-kube-api-access-xnxbn\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128917 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-xfp5s\" (UniqueName: \"kubernetes.io/projected/cc85e424-18b2-4924-920b-bd291a8c4b01-kube-api-access-xfp5s\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128934 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128952 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-twvbl\" (UniqueName: \"kubernetes.io/projected/b4750666-1362-4001-abd0-6f89964cc621-kube-api-access-twvbl\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128971 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c5f2bfad-70f6-4185-a3d9-81ce12720767-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.128988 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a555ff2e-0be6-46d5-897d-863bb92ae2b3-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129006 5113 reconciler_common.go:299] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/869851b9-7ffb-4af0-b166-1d8aa40a5f80-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129024 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cc85e424-18b2-4924-920b-bd291a8c4b01-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129043 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-mjwtd\" (UniqueName: \"kubernetes.io/projected/869851b9-7ffb-4af0-b166-1d8aa40a5f80-kube-api-access-mjwtd\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129061 5113 reconciler_common.go:299] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/f559dfa3-3917-43a2-97f6-61ddfda10e93-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129078 5113 reconciler_common.go:299] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129097 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-99zj9\" (UniqueName: \"kubernetes.io/projected/d565531a-ff86-4608-9d19-767de01ac31b-kube-api-access-99zj9\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129115 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129133 5113 reconciler_common.go:299] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9e9b5059-1b3e-4067-a63d-2952cbe863af-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129151 5113 reconciler_common.go:299] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/d19cb085-0c5b-4810-b654-ce7923221d90-audit\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129168 5113 reconciler_common.go:299] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f559dfa3-3917-43a2-97f6-61ddfda10e93-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129184 5113 reconciler_common.go:299] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9f71a554-e414-4bc3-96d2-674060397afe-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129202 5113 reconciler_common.go:299] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/16bdd140-dce1-464c-ab47-dd5798d1d256-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129219 5113 reconciler_common.go:299] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca-console-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129236 5113 reconciler_common.go:299] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129252 5113 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/a555ff2e-0be6-46d5-897d-863bb92ae2b3-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129269 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94a6e063-3d1a-4d44-875d-185291448c31-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129286 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-8pskd\" (UniqueName: \"kubernetes.io/projected/a555ff2e-0be6-46d5-897d-863bb92ae2b3-kube-api-access-8pskd\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129305 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-d7cps\" (UniqueName: \"kubernetes.io/projected/af41de71-79cf-4590-bbe9-9e8b848862cb-kube-api-access-d7cps\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129324 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09cfa50b-4138-4585-a53e-64dd3ab73335-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129341 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-mfzkj\" (UniqueName: \"kubernetes.io/projected/0effdbcf-dd7d-404d-9d48-77536d665a5d-kube-api-access-mfzkj\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129358 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d19cb085-0c5b-4810-b654-ce7923221d90-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129375 5113 reconciler_common.go:299] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/18f80adb-c1c3-49ba-8ee4-932c851d3897-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129392 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/6edfcf45-925b-4eff-b940-95b6fc0b85d4-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129411 5113 reconciler_common.go:299] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b4750666-1362-4001-abd0-6f89964cc621-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129430 5113 reconciler_common.go:299] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/ce090a97-9ab6-4c40-a719-64ff2acd9778-signing-key\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129447 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-z5rsr\" (UniqueName: \"kubernetes.io/projected/af33e427-6803-48c2-a76a-dd9deb7cbf9a-kube-api-access-z5rsr\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129466 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129483 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f65c0ac1-8bca-454d-a2e6-e35cb418beac-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129500 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-26xrl\" (UniqueName: \"kubernetes.io/projected/a208c9c2-333b-4b4a-be0d-bc32ec38a821-kube-api-access-26xrl\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129523 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31fa8943-81cc-4750-a0b7-0fa9ab5af883-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129578 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f4c38be5-b405-4caf-9ae9-e93c7ca572b1-serviceca\") pod \"node-ca-4q767\" (UID: \"f4c38be5-b405-4caf-9ae9-e93c7ca572b1\") " pod="openshift-image-registry/node-ca-4q767" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129580 5113 reconciler_common.go:299] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129627 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-hckvg\" (UniqueName: \"kubernetes.io/projected/fc8db2c7-859d-47b3-a900-2bd0c0b2973b-kube-api-access-hckvg\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129639 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cc85e424-18b2-4924-920b-bd291a8c4b01-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129650 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-6dmhf\" (UniqueName: \"kubernetes.io/projected/736c54fe-349c-4bb9-870a-d1c1d1c03831-kube-api-access-6dmhf\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.129648 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/27d4d422-313b-48d2-b7ec-7e914beaac62-env-overrides\") pod \"ovnkube-control-plane-57b78d8988-trlrx\" (UID: \"27d4d422-313b-48d2-b7ec-7e914beaac62\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.130668 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/dccb6bc1-d2db-4bf2-a0db-1c84219d0499-proxy-tls\") pod \"machine-config-daemon-gxph5\" (UID: \"dccb6bc1-d2db-4bf2-a0db-1c84219d0499\") " pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.131568 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6740364c-f52c-49d7-9841-823aa6f3894b-ovn-node-metrics-cert\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: W0130 00:11:21.132197 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6ab4dd28_7902_4bc5_959f_47cd8cebb0c1.slice/crio-cee1c94a663e788fca513037f21bdb1ecb66ae39316014604ca6bc1f6ff3a516 WatchSource:0}: Error finding container cee1c94a663e788fca513037f21bdb1ecb66ae39316014604ca6bc1f6ff3a516: Status 404 returned error can't find the container with id cee1c94a663e788fca513037f21bdb1ecb66ae39316014604ca6bc1f6ff3a516 Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.139241 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6740364c-f52c-49d7-9841-823aa6f3894b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-724qr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.143337 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/27d4d422-313b-48d2-b7ec-7e914beaac62-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-57b78d8988-trlrx\" (UID: \"27d4d422-313b-48d2-b7ec-7e914beaac62\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.147149 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.147389 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.147406 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.147432 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.147449 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:21Z","lastTransitionTime":"2026-01-30T00:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.147639 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqw79\" (UniqueName: \"kubernetes.io/projected/f4c38be5-b405-4caf-9ae9-e93c7ca572b1-kube-api-access-gqw79\") pod \"node-ca-4q767\" (UID: \"f4c38be5-b405-4caf-9ae9-e93c7ca572b1\") " pod="openshift-image-registry/node-ca-4q767" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.150763 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b67074d5-281e-468b-aa37-ddb912a9c264\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"20m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://c9513f6490ba61188cc5ec698f270d0e1c17f4e1a9c4bdda59e9e18665a7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"20m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":65534,\\\"supplementalGroups\\\":[65534],\\\"uid\\\":65534}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://556c149aaf3abcc7711392b257ff4f10359d4d9e8ee8b8b3383a970de7a28303\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://556c149aaf3abcc7711392b257ff4f10359d4d9e8ee8b8b3383a970de7a28303\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":65534,\\\"supplementalGroups\\\":[65534],\\\"uid\\\":65534}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.151348 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hczl\" (UniqueName: \"kubernetes.io/projected/6740364c-f52c-49d7-9841-823aa6f3894b-kube-api-access-2hczl\") pod \"ovnkube-node-724qr\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.152077 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6xzs\" (UniqueName: \"kubernetes.io/projected/27d4d422-313b-48d2-b7ec-7e914beaac62-kube-api-access-g6xzs\") pod \"ovnkube-control-plane-57b78d8988-trlrx\" (UID: \"27d4d422-313b-48d2-b7ec-7e914beaac62\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.152570 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-9b8kv\" (UniqueName: \"kubernetes.io/projected/dccb6bc1-d2db-4bf2-a0db-1c84219d0499-kube-api-access-9b8kv\") pod \"machine-config-daemon-gxph5\" (UID: \"dccb6bc1-d2db-4bf2-a0db-1c84219d0499\") " pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.152735 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-67v4x" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.160818 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-fhkjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17b87002-b798-480a-8e17-83053d698239\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gwt8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-fhkjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.168045 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-4q767" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.170212 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.171148 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fbdfe828b092b23e6d4480daf3e0216aada6debaf1ef1b314a0a31e73ebf13c4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-5ff7774fd9-nljh6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.179375 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4q767" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4c38be5-b405-4caf-9ae9-e93c7ca572b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqw79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4q767\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.192443 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" event={"ID":"34177974-8d82-49d2-a763-391d0df3bbd8","Type":"ContainerStarted","Data":"609588889c2d405f5620467c869ae00ccd7477a3a74da58043a03236c614379c"} Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.194264 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.195139 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" event={"ID":"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1","Type":"ContainerStarted","Data":"cee1c94a663e788fca513037f21bdb1ecb66ae39316014604ca6bc1f6ff3a516"} Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.196702 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mbd62" event={"ID":"8ad95d7b-7c01-4672-8614-0cc8e52c0d79","Type":"ContainerStarted","Data":"59e41d3b22a01cde02a88eca51a000e6a552cfe80ebfb52c7f85bb41deef7467"} Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.197713 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-5jnd7" event={"ID":"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc","Type":"ContainerStarted","Data":"14ce1178f1ce3dc7b36112bec1ebbe43be9425c98bb9f23a48a7823fa15c28f3"} Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.198411 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-dgvkt" event={"ID":"fc4541ce-7789-4670-bc75-5c2868e52ce0","Type":"ContainerStarted","Data":"400bd8781e19d3b53cb1e6730a949d572e81747d90de8769e9a9c983c2dc143b"} Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.210472 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" Jan 30 00:11:21 crc kubenswrapper[5113]: W0130 00:11:21.211218 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4c38be5_b405_4caf_9ae9_e93c7ca572b1.slice/crio-be34459a72c00ce6b23af9bc431fca61961e8414ccb541f72deaec6fae7368c3 WatchSource:0}: Error finding container be34459a72c00ce6b23af9bc431fca61961e8414ccb541f72deaec6fae7368c3: Status 404 returned error can't find the container with id be34459a72c00ce6b23af9bc431fca61961e8414ccb541f72deaec6fae7368c3 Jan 30 00:11:21 crc kubenswrapper[5113]: W0130 00:11:21.241236 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6740364c_f52c_49d7_9841_823aa6f3894b.slice/crio-d10209b0c9210bbafea1e5f0426d9e2ccd7ec64ba9f477fdd8b59dbc8f4a1ae1 WatchSource:0}: Error finding container d10209b0c9210bbafea1e5f0426d9e2ccd7ec64ba9f477fdd8b59dbc8f4a1ae1: Status 404 returned error can't find the container with id d10209b0c9210bbafea1e5f0426d9e2ccd7ec64ba9f477fdd8b59dbc8f4a1ae1 Jan 30 00:11:21 crc kubenswrapper[5113]: W0130 00:11:21.243819 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddccb6bc1_d2db_4bf2_a0db_1c84219d0499.slice/crio-46aadb56d7907f5306a0ca920002355b128d12f29606d253f30aed43661aeb2e WatchSource:0}: Error finding container 46aadb56d7907f5306a0ca920002355b128d12f29606d253f30aed43661aeb2e: Status 404 returned error can't find the container with id 46aadb56d7907f5306a0ca920002355b128d12f29606d253f30aed43661aeb2e Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.249861 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.250053 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.250067 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.250083 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.250093 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:21Z","lastTransitionTime":"2026-01-30T00:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:21 crc kubenswrapper[5113]: W0130 00:11:21.261382 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod27d4d422_313b_48d2_b7ec_7e914beaac62.slice/crio-97f0c491d44b4d346a618e810d275e9d2af83e61b47df67417a17065120b856e WatchSource:0}: Error finding container 97f0c491d44b4d346a618e810d275e9d2af83e61b47df67417a17065120b856e: Status 404 returned error can't find the container with id 97f0c491d44b4d346a618e810d275e9d2af83e61b47df67417a17065120b856e Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.352280 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.352329 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.352343 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.352360 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.352373 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:21Z","lastTransitionTime":"2026-01-30T00:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.432074 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gwt8b\" (UniqueName: \"kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b\") pod \"network-check-target-fhkjl\" (UID: \"17b87002-b798-480a-8e17-83053d698239\") " pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.432116 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-l7w75\" (UniqueName: \"kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75\") pod \"network-check-source-5bb8f5cd97-xdvz5\" (UID: \"f863fff9-286a-45fa-b8f0-8a86994b8440\") " pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.432147 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.432164 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.432273 5113 secret.go:189] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.432322 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:22.432309362 +0000 UTC m=+102.504914739 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.432684 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.432699 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.432708 5113 projected.go:194] Error preparing data for projected volume kube-api-access-gwt8b for pod openshift-network-diagnostics/network-check-target-fhkjl: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.432733 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b podName:17b87002-b798-480a-8e17-83053d698239 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:22.432725995 +0000 UTC m=+102.505331372 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-gwt8b" (UniqueName: "kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b") pod "network-check-target-fhkjl" (UID: "17b87002-b798-480a-8e17-83053d698239") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.432769 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.432776 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.432783 5113 projected.go:194] Error preparing data for projected volume kube-api-access-l7w75 for pod openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.432801 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75 podName:f863fff9-286a-45fa-b8f0-8a86994b8440 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:22.432795477 +0000 UTC m=+102.505400854 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-l7w75" (UniqueName: "kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75") pod "network-check-source-5bb8f5cd97-xdvz5" (UID: "f863fff9-286a-45fa-b8f0-8a86994b8440") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.432824 5113 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.432872 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:22.43286522 +0000 UTC m=+102.505470587 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.456482 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.456534 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.456546 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.456579 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.456593 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:21Z","lastTransitionTime":"2026-01-30T00:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.533553 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs\") pod \"network-metrics-daemon-qx4gj\" (UID: \"d655d34c-2969-43f2-8e93-455507c7cfda\") " pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.533687 5113 secret.go:189] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.533750 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs podName:d655d34c-2969-43f2-8e93-455507c7cfda nodeName:}" failed. No retries permitted until 2026-01-30 00:11:22.533733667 +0000 UTC m=+102.606339044 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs") pod "network-metrics-daemon-qx4gj" (UID: "d655d34c-2969-43f2-8e93-455507c7cfda") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.559506 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.559584 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.559598 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.559616 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.559630 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:21Z","lastTransitionTime":"2026-01-30T00:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.634106 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:21 crc kubenswrapper[5113]: E0130 00:11:21.634299 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:22.634282664 +0000 UTC m=+102.706888041 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.662085 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.662136 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.662154 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.662177 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.662224 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:21Z","lastTransitionTime":"2026-01-30T00:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.764365 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.764401 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.764412 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.764427 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.764438 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:21Z","lastTransitionTime":"2026-01-30T00:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.867181 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.867252 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.867264 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.867286 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.867303 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:21Z","lastTransitionTime":"2026-01-30T00:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.969996 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.970048 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.970061 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.970080 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:21 crc kubenswrapper[5113]: I0130 00:11:21.970095 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:21Z","lastTransitionTime":"2026-01-30T00:11:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.072915 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.073231 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.073241 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.073256 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.073267 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:22Z","lastTransitionTime":"2026-01-30T00:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.175292 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.175337 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.175347 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.175365 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.175377 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:22Z","lastTransitionTime":"2026-01-30T00:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.203196 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" event={"ID":"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1","Type":"ContainerStarted","Data":"d8adf7ce71434475f988829f92a7ad5704510610213d83a0c77f27042a6ef652"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.204697 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mbd62" event={"ID":"8ad95d7b-7c01-4672-8614-0cc8e52c0d79","Type":"ContainerStarted","Data":"de200dfacd4aa9bc81f1ab585e923e29b0513c4c2d438d2206ba2325a4b9faab"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.206023 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-4q767" event={"ID":"f4c38be5-b405-4caf-9ae9-e93c7ca572b1","Type":"ContainerStarted","Data":"f9a1848302991a4b4500fe1494239b9d9e49176ef181fde02271c142ce4e5784"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.206055 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-4q767" event={"ID":"f4c38be5-b405-4caf-9ae9-e93c7ca572b1","Type":"ContainerStarted","Data":"be34459a72c00ce6b23af9bc431fca61961e8414ccb541f72deaec6fae7368c3"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.208077 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" event={"ID":"27d4d422-313b-48d2-b7ec-7e914beaac62","Type":"ContainerStarted","Data":"ed1c2109516e4d4faa67a368e46ac6d8d5c3984826eac23b06abb147d7d05bd4"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.208141 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" event={"ID":"27d4d422-313b-48d2-b7ec-7e914beaac62","Type":"ContainerStarted","Data":"84e0d23e9d709e289a86f7f346beec9de5fb61424d6a78b2237daae057d88a5d"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.208156 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" event={"ID":"27d4d422-313b-48d2-b7ec-7e914beaac62","Type":"ContainerStarted","Data":"97f0c491d44b4d346a618e810d275e9d2af83e61b47df67417a17065120b856e"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.215812 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" event={"ID":"dccb6bc1-d2db-4bf2-a0db-1c84219d0499","Type":"ContainerStarted","Data":"b38b1bd48dcebdf77c2b33c150c85d82cae0c6a5e437687b27926a2727d3b309"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.215873 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" event={"ID":"dccb6bc1-d2db-4bf2-a0db-1c84219d0499","Type":"ContainerStarted","Data":"3448c37a3f80491c2d3cfa4d86f18abd9731d5d8a7722c07abbbacc4c6189249"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.215885 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" event={"ID":"dccb6bc1-d2db-4bf2-a0db-1c84219d0499","Type":"ContainerStarted","Data":"46aadb56d7907f5306a0ca920002355b128d12f29606d253f30aed43661aeb2e"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.223910 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6255149e-f462-4c80-a8d7-fcdd2ce199cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://1f0821e94ddb2e6bf615e3accd0ec0c094ad7318840fb733498457386fa12672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://d756a0c45f137a1c35d97de642058fe0719246aa403da963da642b3575e4a7c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://5aca51a566a8bfc282a87f7f7c29ccaa92469aca9b326106d1e177dcb6a0b159\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://1674b7d60c4a07220d2988766c24f8b6c7835f7d1736041dc4cdf00f7a96e9e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1674b7d60c4a07220d2988766c24f8b6c7835f7d1736041dc4cdf00f7a96e9e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:40Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.224147 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-dgvkt" event={"ID":"fc4541ce-7789-4670-bc75-5c2868e52ce0","Type":"ContainerStarted","Data":"43e0a5818727042a6536ac89095a9d971b014d48f18348a7debe6a98a1f6bab3"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.224272 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-dgvkt" event={"ID":"fc4541ce-7789-4670-bc75-5c2868e52ce0","Type":"ContainerStarted","Data":"33775ba8bd59597b2051ff8c466e1e96e3278bc09e493b3a1f8df77e8152d30a"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.226437 5113 generic.go:358] "Generic (PLEG): container finished" podID="6740364c-f52c-49d7-9841-823aa6f3894b" containerID="3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7" exitCode=0 Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.226509 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerDied","Data":"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.226550 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerStarted","Data":"d10209b0c9210bbafea1e5f0426d9e2ccd7ec64ba9f477fdd8b59dbc8f4a1ae1"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.236680 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" event={"ID":"34177974-8d82-49d2-a763-391d0df3bbd8","Type":"ContainerStarted","Data":"504696abe644e4166691626be1f4e2ed2904a60f4844ef61650021fcf80df7a9"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.239412 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-67v4x" event={"ID":"d9239213-5213-4f95-9acf-9d99c18c3f5a","Type":"ContainerStarted","Data":"723d668d164a69b366a41aec453c550a6159e7471e819996b953e1650d928dc1"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.239475 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-67v4x" event={"ID":"d9239213-5213-4f95-9acf-9d99c18c3f5a","Type":"ContainerStarted","Data":"9d27c33dba1227e647c82e26627bb425f491627967ea5d7b7900247a5f59acda"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.248426 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7641c4c3-6ba9-47e9-9fd9-cf5a8e3705af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"300m\\\",\\\"memory\\\":\\\"600Mi\\\"},\\\"containerID\\\":\\\"cri-o://18179ab63cf80a7f758b7fd5824271423f89324275d958eb798af84b0b460a97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"300m\\\",\\\"memory\\\":\\\"600Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:45Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"40m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://895ff1a1765310a6aee57eb2705b412bd05dfe3974bd21a972125242e01f91a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"40m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:45Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://ab0ebe5822f8252d91adf6695fa0650c95e18f3a4a36ba72dfb2277b4ab1778c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:45Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://3802970e9c97e6b99feaf7ca3cfd1f6939675398a66b6034ad9d323eae1838ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:46Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://c84daac274d8f9e1ac29f34d4d1dbecebb0dea078366aa37a4cbbc588a678232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:45Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://f6f2151f8ca9d295331aa70c7cb364785d177a2cabe410797748bb8b3f2d294e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6f2151f8ca9d295331aa70c7cb364785d177a2cabe410797748bb8b3f2d294e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd-auto-backup\\\",\\\"name\\\":\\\"etcd-auto-backup-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://3c5720519c1757ed7c8bf0c9f56cb990b9f450f7c9c6bc1fd4961f8851f2cd14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c5720519c1757ed7c8bf0c9f56cb990b9f450f7c9c6bc1fd4961f8851f2cd14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://65e9cdf813a17a4871a7677b2b0d236147c4db43f9b87a38b60c0795d93a5207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65e9cdf813a17a4871a7677b2b0d236147c4db43f9b87a38b60c0795d93a5207\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:44Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.262746 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df6f4867-b098-485b-81b7-844ef832d471\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"265m\\\",\\\"memory\\\":\\\"1Gi\\\"},\\\"containerID\\\":\\\"cri-o://f193a62499ba084b7bc2ab4965ca8f7c645e6f17135a246bce0bba25105ae580\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"265m\\\",\\\"memory\\\":\\\"1Gi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-bundle-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://10b39007b1d5476031fa74088ab1fc27641cd0d8637344b799ccb7bd0d7ed170\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://3a34c408f150a15c63352bc45a7746036fdb5242aeda8b3d3f68c530dcacca16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://524b98e238697d411aaaba56575ee93f25656aefb79f17572a1504e3f52a32ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://524b98e238697d411aaaba56575ee93f25656aefb79f17572a1504e3f52a32ef\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T00:10:51Z\\\",\\\"message\\\":\\\" 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 00:10:51.157125 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 00:10:51.157152 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 00:10:51.157158 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 00:10:51.157164 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 00:10:51.157168 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 00:10:51.157171 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 00:10:51.157175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0130 00:10:51.157255 1 genericapiserver.go:546] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0130 00:10:51.160382 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController\\\\nI0130 00:10:51.160422 1 shared_informer.go:350] \\\\\\\"Waiting for caches to sync\\\\\\\" controller=\\\\\\\"RequestHeaderAuthRequestController\\\\\\\"\\\\nI0130 00:10:51.160477 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI0130 00:10:51.160476 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0130 00:10:51.160492 1 shared_informer.go:350] \\\\\\\"Waiting for caches to sync\\\\\\\" controller=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI0130 00:10:51.160597 1 shared_informer.go:350] \\\\\\\"Waiting for caches to sync\\\\\\\" controller=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nF0130 00:10:51.160637 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T00:10:50Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://77c6a4ce119d456c938cd7b1ab2a1187857c309661ce7a3c4ebafdc977385d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:44Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://84f00261046fc5a9b778b011faaff480069dd0eb02fda16510bbd01e21895988\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84f00261046fc5a9b778b011faaff480069dd0eb02fda16510bbd01e21895988\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.272789 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/multus-mbd62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2f6lr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mbd62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.278170 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.278212 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.278251 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.278268 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.278280 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:22Z","lastTransitionTime":"2026-01-30T00:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.287084 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8adf7ce71434475f988829f92a7ad5704510610213d83a0c77f27042a6ef652\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"resources\\\":{},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:11:21Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6efa070ceb93cc5fc2e76eab6d9c96ac3c4f8812085d0b6eb6e3f513b5bac782\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3454e762466e22e2a893650b9781823558bc6fdfda2aa4188aff3cb819014c4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/etc/whereabouts/config\\\",\\\"name\\\":\\\"whereabouts-flatfile-configmap\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-t4r5k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.296988 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-dns/node-resolver-67v4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9239213-5213-4f95-9acf-9d99c18c3f5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g27c2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-67v4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.306569 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dccb6bc1-d2db-4bf2-a0db-1c84219d0499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9b8kv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9b8kv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gxph5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.323747 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4c6c3b9f-83fe-4921-bed0-c97e707ee433\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://fb5de81be22616fca47976a0d24ab6c6b330a5560704d7c9bd3f30816a6a53c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"60m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://6e418d2037fa46413cbb5c58dc73ecf2ecc6f110ebd3bfec9715e53ec0b6c855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"60m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://c01e6a834297ca754c755531034c6a4ded795a39c18f0f97cc0dc73214b2356a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://d32db858499f6acdd8dcfeec470facb33761e98b6974386bdf9e165a721026b8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.334865 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34177974-8d82-49d2-a763-391d0df3bbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m7xz2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-7bdcf4f5bd-7fjxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.345416 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f863fff9-286a-45fa-b8f0-8a86994b8440\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l7w75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5bb8f5cd97-xdvz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.356008 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-dgvkt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc4541ce-7789-4670-bc75-5c2868e52ce0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-dgvkt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.363865 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qx4gj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d655d34c-2969-43f2-8e93-455507c7cfda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lldsn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:49b34ce0d25eec7a6077f4bf21bf7d4e64e598d28785a20b9ee3594423b7de14\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lldsn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qx4gj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.377458 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6740364c-f52c-49d7-9841-823aa6f3894b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-724qr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.382100 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.382154 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.382166 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.382194 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.382210 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:22Z","lastTransitionTime":"2026-01-30T00:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.386937 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b67074d5-281e-468b-aa37-ddb912a9c264\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"20m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://c9513f6490ba61188cc5ec698f270d0e1c17f4e1a9c4bdda59e9e18665a7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"20m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":65534,\\\"supplementalGroups\\\":[65534],\\\"uid\\\":65534}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://556c149aaf3abcc7711392b257ff4f10359d4d9e8ee8b8b3383a970de7a28303\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://556c149aaf3abcc7711392b257ff4f10359d4d9e8ee8b8b3383a970de7a28303\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":65534,\\\"supplementalGroups\\\":[65534],\\\"uid\\\":65534}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.398833 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-fhkjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17b87002-b798-480a-8e17-83053d698239\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gwt8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-fhkjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.410277 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fbdfe828b092b23e6d4480daf3e0216aada6debaf1ef1b314a0a31e73ebf13c4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-5ff7774fd9-nljh6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.416811 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4q767" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4c38be5-b405-4caf-9ae9-e93c7ca572b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqw79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4q767\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.424005 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27d4d422-313b-48d2-b7ec-7e914beaac62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g6xzs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g6xzs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-57b78d8988-trlrx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.433201 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-5jnd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dsgwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-5jnd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.443649 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.443792 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gwt8b\" (UniqueName: \"kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b\") pod \"network-check-target-fhkjl\" (UID: \"17b87002-b798-480a-8e17-83053d698239\") " pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.443823 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-l7w75\" (UniqueName: \"kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75\") pod \"network-check-source-5bb8f5cd97-xdvz5\" (UID: \"f863fff9-286a-45fa-b8f0-8a86994b8440\") " pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.443879 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.443917 5113 secret.go:189] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.443953 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.443977 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.443990 5113 projected.go:194] Error preparing data for projected volume kube-api-access-gwt8b for pod openshift-network-diagnostics/network-check-target-fhkjl: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.444008 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:24.443987027 +0000 UTC m=+104.516592414 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.444011 5113 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.444034 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b podName:17b87002-b798-480a-8e17-83053d698239 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:24.444021318 +0000 UTC m=+104.516626695 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-gwt8b" (UniqueName: "kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b") pod "network-check-target-fhkjl" (UID: "17b87002-b798-480a-8e17-83053d698239") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.444055 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:24.444045969 +0000 UTC m=+104.516651346 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.444079 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.444121 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.444138 5113 projected.go:194] Error preparing data for projected volume kube-api-access-l7w75 for pod openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.444219 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75 podName:f863fff9-286a-45fa-b8f0-8a86994b8440 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:24.444197933 +0000 UTC m=+104.516803310 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-l7w75" (UniqueName: "kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75") pod "network-check-source-5bb8f5cd97-xdvz5" (UID: "f863fff9-286a-45fa-b8f0-8a86994b8440") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.445237 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fbdfe828b092b23e6d4480daf3e0216aada6debaf1ef1b314a0a31e73ebf13c4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-5ff7774fd9-nljh6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.453554 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4q767" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4c38be5-b405-4caf-9ae9-e93c7ca572b1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"10Mi\\\"},\\\"containerID\\\":\\\"cri-o://f9a1848302991a4b4500fe1494239b9d9e49176ef181fde02271c142ce4e5784\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dcb03ccba25366bbdf74cbab6738e7ef1f97f62760886ec445a40cdf29b60418\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"10Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:11:21Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":1001}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gqw79\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4q767\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.463118 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"27d4d422-313b-48d2-b7ec-7e914beaac62\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"20Mi\\\"},\\\"containerID\\\":\\\"cri-o://84e0d23e9d709e289a86f7f346beec9de5fb61424d6a78b2237daae057d88a5d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"20Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:11:21Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":65534,\\\"supplementalGroups\\\":[65534],\\\"uid\\\":65534}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g6xzs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"300Mi\\\"},\\\"containerID\\\":\\\"cri-o://ed1c2109516e4d4faa67a368e46ac6d8d5c3984826eac23b06abb147d7d05bd4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"300Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:11:22Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g6xzs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-57b78d8988-trlrx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.473167 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-5jnd7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dsgwk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-5jnd7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.483685 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.483724 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.483733 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.483747 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.483758 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:22Z","lastTransitionTime":"2026-01-30T00:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.486764 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6255149e-f462-4c80-a8d7-fcdd2ce199cd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://1f0821e94ddb2e6bf615e3accd0ec0c094ad7318840fb733498457386fa12672\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://d756a0c45f137a1c35d97de642058fe0719246aa403da963da642b3575e4a7c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://5aca51a566a8bfc282a87f7f7c29ccaa92469aca9b326106d1e177dcb6a0b159\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e504172345491d90bbbf1e7e45488e73073f4c6d7c2355245871051596fc85db\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://1674b7d60c4a07220d2988766c24f8b6c7835f7d1736041dc4cdf00f7a96e9e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"15m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1674b7d60c4a07220d2988766c24f8b6c7835f7d1736041dc4cdf00f7a96e9e2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:40Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.507480 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7641c4c3-6ba9-47e9-9fd9-cf5a8e3705af\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"300m\\\",\\\"memory\\\":\\\"600Mi\\\"},\\\"containerID\\\":\\\"cri-o://18179ab63cf80a7f758b7fd5824271423f89324275d958eb798af84b0b460a97\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"300m\\\",\\\"memory\\\":\\\"600Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:45Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"40m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://895ff1a1765310a6aee57eb2705b412bd05dfe3974bd21a972125242e01f91a5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"40m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:45Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://ab0ebe5822f8252d91adf6695fa0650c95e18f3a4a36ba72dfb2277b4ab1778c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:45Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://3802970e9c97e6b99feaf7ca3cfd1f6939675398a66b6034ad9d323eae1838ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf05b9b2ba66351a6c59f4259fb377f62237a00af3b4f0b95f64409e2f25770e\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:46Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://c84daac274d8f9e1ac29f34d4d1dbecebb0dea078366aa37a4cbbc588a678232\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:45Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://f6f2151f8ca9d295331aa70c7cb364785d177a2cabe410797748bb8b3f2d294e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6f2151f8ca9d295331aa70c7cb364785d177a2cabe410797748bb8b3f2d294e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd-auto-backup\\\",\\\"name\\\":\\\"etcd-auto-backup-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://3c5720519c1757ed7c8bf0c9f56cb990b9f450f7c9c6bc1fd4961f8851f2cd14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c5720519c1757ed7c8bf0c9f56cb990b9f450f7c9c6bc1fd4961f8851f2cd14\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"},\\\"containerID\\\":\\\"cri-o://65e9cdf813a17a4871a7677b2b0d236147c4db43f9b87a38b60c0795d93a5207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9e388ee2b3562b6267447cbcc4b95ca7a61bf361840d36a682480da671b83612\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"60Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://65e9cdf813a17a4871a7677b2b0d236147c4db43f9b87a38b60c0795d93a5207\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:44Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.521099 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"df6f4867-b098-485b-81b7-844ef832d471\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"265m\\\",\\\"memory\\\":\\\"1Gi\\\"},\\\"containerID\\\":\\\"cri-o://f193a62499ba084b7bc2ab4965ca8f7c645e6f17135a246bce0bba25105ae580\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"265m\\\",\\\"memory\\\":\\\"1Gi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-bundle-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://10b39007b1d5476031fa74088ab1fc27641cd0d8637344b799ccb7bd0d7ed170\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://3a34c408f150a15c63352bc45a7746036fdb5242aeda8b3d3f68c530dcacca16\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://524b98e238697d411aaaba56575ee93f25656aefb79f17572a1504e3f52a32ef\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://524b98e238697d411aaaba56575ee93f25656aefb79f17572a1504e3f52a32ef\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T00:10:51Z\\\",\\\"message\\\":\\\" 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 00:10:51.157125 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 00:10:51.157152 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 00:10:51.157158 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 00:10:51.157164 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 00:10:51.157168 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 00:10:51.157171 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 00:10:51.157175 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0130 00:10:51.157255 1 genericapiserver.go:546] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nI0130 00:10:51.160382 1 requestheader_controller.go:180] Starting RequestHeaderAuthRequestController\\\\nI0130 00:10:51.160422 1 shared_informer.go:350] \\\\\\\"Waiting for caches to sync\\\\\\\" controller=\\\\\\\"RequestHeaderAuthRequestController\\\\\\\"\\\\nI0130 00:10:51.160477 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI0130 00:10:51.160476 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI0130 00:10:51.160492 1 shared_informer.go:350] \\\\\\\"Waiting for caches to sync\\\\\\\" controller=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI0130 00:10:51.160597 1 shared_informer.go:350] \\\\\\\"Waiting for caches to sync\\\\\\\" controller=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nF0130 00:10:51.160637 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T00:10:50Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(3a14caf222afb62aaabdc47808b6f944)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://77c6a4ce119d456c938cd7b1ab2a1187857c309661ce7a3c4ebafdc977385d24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:44Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://84f00261046fc5a9b778b011faaff480069dd0eb02fda16510bbd01e21895988\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84f00261046fc5a9b778b011faaff480069dd0eb02fda16510bbd01e21895988\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.534353 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/multus-mbd62" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8ad95d7b-7c01-4672-8614-0cc8e52c0d79\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"65Mi\\\"},\\\"containerID\\\":\\\"cri-o://de200dfacd4aa9bc81f1ab585e923e29b0513c4c2d438d2206ba2325a4b9faab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"65Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:11:21Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2f6lr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-multus\"/\"multus-mbd62\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.544826 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs\") pod \"network-metrics-daemon-qx4gj\" (UID: \"d655d34c-2969-43f2-8e93-455507c7cfda\") " pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.544997 5113 secret.go:189] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.545063 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs podName:d655d34c-2969-43f2-8e93-455507c7cfda nodeName:}" failed. No retries permitted until 2026-01-30 00:11:24.54504637 +0000 UTC m=+104.617651757 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs") pod "network-metrics-daemon-qx4gj" (UID: "d655d34c-2969-43f2-8e93-455507c7cfda") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.547026 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:85f1323d589d7af13b096b1f9b438b9dfe08f3fab37534e2780e6490a665bf05\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8adf7ce71434475f988829f92a7ad5704510610213d83a0c77f27042a6ef652\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5a2a7b3c2f1598189d8880e6aa15ab11a65b201f25012f77ba41e7487a60729a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"resources\\\":{},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:11:21Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b55c029f731ebbde3c5580eef98a588264f4d6a8ae667805c9521dd1ecf1d5d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6efa070ceb93cc5fc2e76eab6d9c96ac3c4f8812085d0b6eb6e3f513b5bac782\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3454e762466e22e2a893650b9781823558bc6fdfda2aa4188aff3cb819014c4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:be2edaed22535093bdb486afe5960ff4f3b0bd96f88dc1753b584cc28184a0b0\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/etc/whereabouts/config\\\",\\\"name\\\":\\\"whereabouts-flatfile-configmap\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-58vql\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-t4r5k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.557821 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-dns/node-resolver-67v4x" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d9239213-5213-4f95-9acf-9d99c18c3f5a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"21Mi\\\"},\\\"containerID\\\":\\\"cri-o://723d668d164a69b366a41aec453c550a6159e7471e819996b953e1650d928dc1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5c5d7468f6838b6a714482e62ea956659212f3415ec8f69989f75eb6d8744a6e\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"21Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:11:21Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-g27c2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-67v4x\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.570452 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"dccb6bc1-d2db-4bf2-a0db-1c84219d0499\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"20m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://b38b1bd48dcebdf77c2b33c150c85d82cae0c6a5e437687b27926a2727d3b309\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"20m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:11:22Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":65534,\\\"supplementalGroups\\\":[65534],\\\"uid\\\":65534}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9b8kv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"20m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://3448c37a3f80491c2d3cfa4d86f18abd9731d5d8a7722c07abbbacc4c6189249\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9414357f9345a841e0565265700ecc6637f846c83bd5908dbb7b306432465115\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"20m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:11:21Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-9b8kv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-gxph5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.585866 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.585918 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.585928 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.585948 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.585957 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:22Z","lastTransitionTime":"2026-01-30T00:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.589360 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4c6c3b9f-83fe-4921-bed0-c97e707ee433\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:10:04Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://fb5de81be22616fca47976a0d24ab6c6b330a5560704d7c9bd3f30816a6a53c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c6a47106effd9e9a41131e2bf6c832b80cd77b3439334f760b35b0729f2fb00\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"60m\\\",\\\"memory\\\":\\\"200Mi\\\"},\\\"containerID\\\":\\\"cri-o://6e418d2037fa46413cbb5c58dc73ecf2ecc6f110ebd3bfec9715e53ec0b6c855\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8a46fa8feeea5d04fd602559027f8bacc97e12bbf8e33793dca08e812e1f8825\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"60m\\\",\\\"memory\\\":\\\"200Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://c01e6a834297ca754c755531034c6a4ded795a39c18f0f97cc0dc73214b2356a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://d32db858499f6acdd8dcfeec470facb33761e98b6974386bdf9e165a721026b8\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:f69b9cc9b9cfde726109a9e12b80a3eefa472d7e29159df0fbc7143c48983cd6\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/tmp\\\",\\\"name\\\":\\\"tmp-dir\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem\\\",\\\"name\\\":\\\"ca-trust-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/kubernetes\\\",\\\"name\\\":\\\"var-run-kubernetes\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.602599 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-operator/network-operator-7bdcf4f5bd-7fjxv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"34177974-8d82-49d2-a763-391d0df3bbd8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://504696abe644e4166691626be1f4e2ed2904a60f4844ef61650021fcf80df7a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:11:21Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":65534,\\\"supplementalGroups\\\":[65534],\\\"uid\\\":65534}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-m7xz2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-7bdcf4f5bd-7fjxv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.614676 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f863fff9-286a-45fa-b8f0-8a86994b8440\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l7w75\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-5bb8f5cd97-xdvz5\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.624119 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-dgvkt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fc4541ce-7789-4670-bc75-5c2868e52ce0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://43e0a5818727042a6536ac89095a9d971b014d48f18348a7debe6a98a1f6bab3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:11:21Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0,1000500000],\\\"uid\\\":1000500000}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://33775ba8bd59597b2051ff8c466e1e96e3278bc09e493b3a1f8df77e8152d30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"10m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:11:21Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0,1000500000],\\\"uid\\\":1000500000}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8nt2j\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-dgvkt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.631115 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qx4gj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d655d34c-2969-43f2-8e93-455507c7cfda\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lldsn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:49b34ce0d25eec7a6077f4bf21bf7d4e64e598d28785a20b9ee3594423b7de14\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lldsn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qx4gj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.645644 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.645808 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:24.645790383 +0000 UTC m=+104.718395760 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.648217 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6740364c-f52c-49d7-9841-823aa6f3894b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174629230f874ae7d9ceda909ef45aced0cc8b21537851a0aceca55b0685b122\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"resources\\\":{},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:11:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:11:21Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":0,\\\"supplementalGroups\\\":[0],\\\"uid\\\":0}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hczl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:11:20Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-724qr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.658139 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b67074d5-281e-468b-aa37-ddb912a9c264\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T00:09:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"20m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://c9513f6490ba61188cc5ec698f270d0e1c17f4e1a9c4bdda59e9e18665a7dd9e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"20m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T00:09:43Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":65534,\\\"supplementalGroups\\\":[65534],\\\"uid\\\":65534}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"allocatedResources\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"},\\\"containerID\\\":\\\"cri-o://556c149aaf3abcc7711392b257ff4f10359d4d9e8ee8b8b3383a970de7a28303\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:16d5a229c172bde2f4238e8a88602fd6351d80b262f35484740a979d8b3567a5\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"resources\\\":{\\\"requests\\\":{\\\"cpu\\\":\\\"5m\\\",\\\"memory\\\":\\\"50Mi\\\"}},\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://556c149aaf3abcc7711392b257ff4f10359d4d9e8ee8b8b3383a970de7a28303\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T00:09:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T00:09:42Z\\\"}},\\\"user\\\":{\\\"linux\\\":{\\\"gid\\\":65534,\\\"supplementalGroups\\\":[65534],\\\"uid\\\":65534}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T00:09:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.670436 5113 status_manager.go:919] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-fhkjl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"17b87002-b798-480a-8e17-83053d698239\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T00:11:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a8581a82ba5c8343a743aa302c4848249d8c32a9f2cd10fa68d89d835a1bdf8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":4,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gwt8b\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-fhkjl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.687508 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.687688 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.687758 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.687861 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.688095 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:22Z","lastTransitionTime":"2026-01-30T00:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.772483 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.772590 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.772796 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" podUID="f863fff9-286a-45fa-b8f0-8a86994b8440" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.772937 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.773033 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.773083 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.773350 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:22 crc kubenswrapper[5113]: E0130 00:11:22.772980 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qx4gj" podUID="d655d34c-2969-43f2-8e93-455507c7cfda" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.777679 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01080b46-74f1-4191-8755-5152a57b3b25" path="/var/lib/kubelet/pods/01080b46-74f1-4191-8755-5152a57b3b25/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.779832 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09cfa50b-4138-4585-a53e-64dd3ab73335" path="/var/lib/kubelet/pods/09cfa50b-4138-4585-a53e-64dd3ab73335/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.786636 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0dd0fbac-8c0d-4228-8faa-abbeedabf7db" path="/var/lib/kubelet/pods/0dd0fbac-8c0d-4228-8faa-abbeedabf7db/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.790436 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.790469 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.790482 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.790499 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.790509 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:22Z","lastTransitionTime":"2026-01-30T00:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.801057 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0effdbcf-dd7d-404d-9d48-77536d665a5d" path="/var/lib/kubelet/pods/0effdbcf-dd7d-404d-9d48-77536d665a5d/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.807630 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="149b3c48-e17c-4a66-a835-d86dabf6ff13" path="/var/lib/kubelet/pods/149b3c48-e17c-4a66-a835-d86dabf6ff13/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.811488 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16bdd140-dce1-464c-ab47-dd5798d1d256" path="/var/lib/kubelet/pods/16bdd140-dce1-464c-ab47-dd5798d1d256/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.812300 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18f80adb-c1c3-49ba-8ee4-932c851d3897" path="/var/lib/kubelet/pods/18f80adb-c1c3-49ba-8ee4-932c851d3897/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.813834 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20ce4d18-fe25-4696-ad7c-1bd2d6200a3e" path="/var/lib/kubelet/pods/20ce4d18-fe25-4696-ad7c-1bd2d6200a3e/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.814446 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2325ffef-9d5b-447f-b00e-3efc429acefe" path="/var/lib/kubelet/pods/2325ffef-9d5b-447f-b00e-3efc429acefe/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.817418 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="301e1965-1754-483d-b6cc-bfae7038bbca" path="/var/lib/kubelet/pods/301e1965-1754-483d-b6cc-bfae7038bbca/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.820942 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31fa8943-81cc-4750-a0b7-0fa9ab5af883" path="/var/lib/kubelet/pods/31fa8943-81cc-4750-a0b7-0fa9ab5af883/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.826129 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42a11a02-47e1-488f-b270-2679d3298b0e" path="/var/lib/kubelet/pods/42a11a02-47e1-488f-b270-2679d3298b0e/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.827776 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="567683bd-0efc-4f21-b076-e28559628404" path="/var/lib/kubelet/pods/567683bd-0efc-4f21-b076-e28559628404/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.832705 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="584e1f4a-8205-47d7-8efb-3afc6017c4c9" path="/var/lib/kubelet/pods/584e1f4a-8205-47d7-8efb-3afc6017c4c9/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.834002 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="593a3561-7760-45c5-8f91-5aaef7475d0f" path="/var/lib/kubelet/pods/593a3561-7760-45c5-8f91-5aaef7475d0f/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.837560 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ebfebf6-3ecd-458e-943f-bb25b52e2718" path="/var/lib/kubelet/pods/5ebfebf6-3ecd-458e-943f-bb25b52e2718/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.838651 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6077b63e-53a2-4f96-9d56-1ce0324e4913" path="/var/lib/kubelet/pods/6077b63e-53a2-4f96-9d56-1ce0324e4913/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.843420 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca" path="/var/lib/kubelet/pods/6a81eec9-f29e-49a0-a15a-f2f5bd2d95ca/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.850108 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6edfcf45-925b-4eff-b940-95b6fc0b85d4" path="/var/lib/kubelet/pods/6edfcf45-925b-4eff-b940-95b6fc0b85d4/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.854082 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ee8fbd3-1f81-4666-96da-5afc70819f1a" path="/var/lib/kubelet/pods/6ee8fbd3-1f81-4666-96da-5afc70819f1a/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.856577 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a" path="/var/lib/kubelet/pods/71c8ffbe-59c6-4e7d-aa1a-bbd315b3414a/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.873733 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="736c54fe-349c-4bb9-870a-d1c1d1c03831" path="/var/lib/kubelet/pods/736c54fe-349c-4bb9-870a-d1c1d1c03831/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.880335 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7599e0b6-bddf-4def-b7f2-0b32206e8651" path="/var/lib/kubelet/pods/7599e0b6-bddf-4def-b7f2-0b32206e8651/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.883230 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7afa918d-be67-40a6-803c-d3b0ae99d815" path="/var/lib/kubelet/pods/7afa918d-be67-40a6-803c-d3b0ae99d815/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.885053 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7df94c10-441d-4386-93a6-6730fb7bcde0" path="/var/lib/kubelet/pods/7df94c10-441d-4386-93a6-6730fb7bcde0/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.889429 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7fcc6409-8a0f-44c3-89e7-5aecd7610f8a" path="/var/lib/kubelet/pods/7fcc6409-8a0f-44c3-89e7-5aecd7610f8a/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.893257 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.893295 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.893305 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.893320 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.893329 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:22Z","lastTransitionTime":"2026-01-30T00:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.907589 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81e39f7b-62e4-4fc9-992a-6535ce127a02" path="/var/lib/kubelet/pods/81e39f7b-62e4-4fc9-992a-6535ce127a02/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.909123 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="869851b9-7ffb-4af0-b166-1d8aa40a5f80" path="/var/lib/kubelet/pods/869851b9-7ffb-4af0-b166-1d8aa40a5f80/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.917436 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff" path="/var/lib/kubelet/pods/9276f8f5-2f24-48e1-ab6d-1aab0d8ec3ff/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.918507 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92dfbade-90b6-4169-8c07-72cff7f2c82b" path="/var/lib/kubelet/pods/92dfbade-90b6-4169-8c07-72cff7f2c82b/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.922065 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94a6e063-3d1a-4d44-875d-185291448c31" path="/var/lib/kubelet/pods/94a6e063-3d1a-4d44-875d-185291448c31/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.924050 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f71a554-e414-4bc3-96d2-674060397afe" path="/var/lib/kubelet/pods/9f71a554-e414-4bc3-96d2-674060397afe/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.952194 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a208c9c2-333b-4b4a-be0d-bc32ec38a821" path="/var/lib/kubelet/pods/a208c9c2-333b-4b4a-be0d-bc32ec38a821/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.963231 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a52afe44-fb37-46ed-a1f8-bf39727a3cbe" path="/var/lib/kubelet/pods/a52afe44-fb37-46ed-a1f8-bf39727a3cbe/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.964254 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a555ff2e-0be6-46d5-897d-863bb92ae2b3" path="/var/lib/kubelet/pods/a555ff2e-0be6-46d5-897d-863bb92ae2b3/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.965340 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7a88189-c967-4640-879e-27665747f20c" path="/var/lib/kubelet/pods/a7a88189-c967-4640-879e-27665747f20c/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.975355 5113 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="af33e427-6803-48c2-a76a-dd9deb7cbf9a" path="/var/lib/kubelet/pods/af33e427-6803-48c2-a76a-dd9deb7cbf9a/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.975566 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af33e427-6803-48c2-a76a-dd9deb7cbf9a" path="/var/lib/kubelet/pods/af33e427-6803-48c2-a76a-dd9deb7cbf9a/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.979455 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af41de71-79cf-4590-bbe9-9e8b848862cb" path="/var/lib/kubelet/pods/af41de71-79cf-4590-bbe9-9e8b848862cb/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.988054 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a" path="/var/lib/kubelet/pods/b05a4c1d-fa93-4d3d-b6e5-235473e1ae2a/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.991587 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4750666-1362-4001-abd0-6f89964cc621" path="/var/lib/kubelet/pods/b4750666-1362-4001-abd0-6f89964cc621/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.995402 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b605f283-6f2e-42da-a838-54421690f7d0" path="/var/lib/kubelet/pods/b605f283-6f2e-42da-a838-54421690f7d0/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.996168 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c491984c-7d4b-44aa-8c1e-d7974424fa47" path="/var/lib/kubelet/pods/c491984c-7d4b-44aa-8c1e-d7974424fa47/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.996820 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.996859 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.996869 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.996884 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.996895 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:22Z","lastTransitionTime":"2026-01-30T00:11:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.998234 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5f2bfad-70f6-4185-a3d9-81ce12720767" path="/var/lib/kubelet/pods/c5f2bfad-70f6-4185-a3d9-81ce12720767/volumes" Jan 30 00:11:22 crc kubenswrapper[5113]: I0130 00:11:22.999284 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc85e424-18b2-4924-920b-bd291a8c4b01" path="/var/lib/kubelet/pods/cc85e424-18b2-4924-920b-bd291a8c4b01/volumes" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.000450 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce090a97-9ab6-4c40-a719-64ff2acd9778" path="/var/lib/kubelet/pods/ce090a97-9ab6-4c40-a719-64ff2acd9778/volumes" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.005836 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d19cb085-0c5b-4810-b654-ce7923221d90" path="/var/lib/kubelet/pods/d19cb085-0c5b-4810-b654-ce7923221d90/volumes" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.011869 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d45be74c-0d98-4d18-90e4-f7ef1b6daaf7" path="/var/lib/kubelet/pods/d45be74c-0d98-4d18-90e4-f7ef1b6daaf7/volumes" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.018916 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d565531a-ff86-4608-9d19-767de01ac31b" path="/var/lib/kubelet/pods/d565531a-ff86-4608-9d19-767de01ac31b/volumes" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.020260 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7e8f42f-dc0e-424b-bb56-5ec849834888" path="/var/lib/kubelet/pods/d7e8f42f-dc0e-424b-bb56-5ec849834888/volumes" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.022195 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9" path="/var/lib/kubelet/pods/dcd10325-9ba5-4a3b-8e4a-e57e3bf210f9/volumes" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.026219 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e093be35-bb62-4843-b2e8-094545761610" path="/var/lib/kubelet/pods/e093be35-bb62-4843-b2e8-094545761610/volumes" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.028697 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1d2a42d-af1d-4054-9618-ab545e0ed8b7" path="/var/lib/kubelet/pods/e1d2a42d-af1d-4054-9618-ab545e0ed8b7/volumes" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.031130 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f559dfa3-3917-43a2-97f6-61ddfda10e93" path="/var/lib/kubelet/pods/f559dfa3-3917-43a2-97f6-61ddfda10e93/volumes" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.035675 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f65c0ac1-8bca-454d-a2e6-e35cb418beac" path="/var/lib/kubelet/pods/f65c0ac1-8bca-454d-a2e6-e35cb418beac/volumes" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.037204 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4" path="/var/lib/kubelet/pods/f7648cbb-48eb-4ba8-87ec-eb096b8fa1e4/volumes" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.038273 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7e2c886-118e-43bb-bef1-c78134de392b" path="/var/lib/kubelet/pods/f7e2c886-118e-43bb-bef1-c78134de392b/volumes" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.041845 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc8db2c7-859d-47b3-a900-2bd0c0b2973b" path="/var/lib/kubelet/pods/fc8db2c7-859d-47b3-a900-2bd0c0b2973b/volumes" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.099418 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.099458 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.099469 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.099483 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.099492 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.201293 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.201336 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.201349 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.201373 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.201391 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.247257 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerStarted","Data":"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166"} Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.247300 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerStarted","Data":"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6"} Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.247313 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerStarted","Data":"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c"} Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.247323 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerStarted","Data":"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a"} Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.248721 5113 generic.go:358] "Generic (PLEG): container finished" podID="6ab4dd28-7902-4bc5-959f-47cd8cebb0c1" containerID="d8adf7ce71434475f988829f92a7ad5704510610213d83a0c77f27042a6ef652" exitCode=0 Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.248982 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" event={"ID":"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1","Type":"ContainerDied","Data":"d8adf7ce71434475f988829f92a7ad5704510610213d83a0c77f27042a6ef652"} Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.304088 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.304143 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.304472 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.304495 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.304508 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.323866 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=3.32384815 podStartE2EDuration="3.32384815s" podCreationTimestamp="2026-01-30 00:11:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:23.319427533 +0000 UTC m=+103.392032940" watchObservedRunningTime="2026-01-30 00:11:23.32384815 +0000 UTC m=+103.396453537" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.324368 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=4.324358026 podStartE2EDuration="4.324358026s" podCreationTimestamp="2026-01-30 00:11:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:23.290334588 +0000 UTC m=+103.362940015" watchObservedRunningTime="2026-01-30 00:11:23.324358026 +0000 UTC m=+103.396963423" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.407221 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.407277 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.407290 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.407331 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.407344 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.419754 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-mbd62" podStartSLOduration=81.419495235 podStartE2EDuration="1m21.419495235s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:23.418985039 +0000 UTC m=+103.491590406" watchObservedRunningTime="2026-01-30 00:11:23.419495235 +0000 UTC m=+103.492100612" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.452020 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-67v4x" podStartSLOduration=81.452003527 podStartE2EDuration="1m21.452003527s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:23.451504661 +0000 UTC m=+103.524110038" watchObservedRunningTime="2026-01-30 00:11:23.452003527 +0000 UTC m=+103.524608904" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.466187 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podStartSLOduration=81.466168848 podStartE2EDuration="1m21.466168848s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:23.465415654 +0000 UTC m=+103.538021041" watchObservedRunningTime="2026-01-30 00:11:23.466168848 +0000 UTC m=+103.538774225" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.481982 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=3.481966429 podStartE2EDuration="3.481966429s" podCreationTimestamp="2026-01-30 00:11:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:23.481272987 +0000 UTC m=+103.553878364" watchObservedRunningTime="2026-01-30 00:11:23.481966429 +0000 UTC m=+103.554571806" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.509763 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.509795 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.509804 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.509817 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.509826 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.584542 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=4.584496977 podStartE2EDuration="4.584496977s" podCreationTimestamp="2026-01-30 00:11:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:23.566044773 +0000 UTC m=+103.638650150" watchObservedRunningTime="2026-01-30 00:11:23.584496977 +0000 UTC m=+103.657102354" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.609661 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-4q767" podStartSLOduration=81.609644789 podStartE2EDuration="1m21.609644789s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:23.608862355 +0000 UTC m=+103.681467732" watchObservedRunningTime="2026-01-30 00:11:23.609644789 +0000 UTC m=+103.682250166" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.614314 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.614378 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.614392 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.614413 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.614426 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.626970 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" podStartSLOduration=80.626946527 podStartE2EDuration="1m20.626946527s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:23.62638522 +0000 UTC m=+103.698990597" watchObservedRunningTime="2026-01-30 00:11:23.626946527 +0000 UTC m=+103.699551924" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.717316 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.717404 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.717423 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.717448 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.717462 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.820238 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.820283 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.820294 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.820314 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.820327 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.923934 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.923984 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.924002 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.924024 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:23 crc kubenswrapper[5113]: I0130 00:11:23.924039 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:23Z","lastTransitionTime":"2026-01-30T00:11:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.026509 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.027268 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.027424 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.027638 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.027806 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.131511 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.131578 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.131593 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.131614 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.131628 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.234352 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.234399 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.234409 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.234431 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.234441 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.258972 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerStarted","Data":"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07"} Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.259051 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerStarted","Data":"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20"} Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.262263 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" event={"ID":"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1","Type":"ContainerStarted","Data":"891ecd206bd6477afee018fb8f3cb990a08b07a8e9859df55d7ad391b5b0e931"} Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.264693 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-5jnd7" event={"ID":"428b39f5-eb1c-4f65-b7a4-eeb6e84860cc","Type":"ContainerStarted","Data":"b9cca29bb4b13404cca3d8f60c116ffb029d1bb4f6cfb3aed636469a345b5a48"} Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.337119 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.337459 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.337646 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.337796 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.337932 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.468848 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.468909 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.468928 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.468955 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.468975 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.476982 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-l7w75\" (UniqueName: \"kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75\") pod \"network-check-source-5bb8f5cd97-xdvz5\" (UID: \"f863fff9-286a-45fa-b8f0-8a86994b8440\") " pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.477161 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.477268 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.477588 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gwt8b\" (UniqueName: \"kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b\") pod \"network-check-target-fhkjl\" (UID: \"17b87002-b798-480a-8e17-83053d698239\") " pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.477326 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.477808 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.477346 5113 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.477407 5113 secret.go:189] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.477725 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.478343 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.477896 5113 projected.go:194] Error preparing data for projected volume kube-api-access-l7w75 for pod openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.478085 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:28.478053767 +0000 UTC m=+108.550659174 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.478419 5113 projected.go:194] Error preparing data for projected volume kube-api-access-gwt8b for pod openshift-network-diagnostics/network-check-target-fhkjl: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.478750 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:28.478728689 +0000 UTC m=+108.551334136 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.478885 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75 podName:f863fff9-286a-45fa-b8f0-8a86994b8440 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:28.478837712 +0000 UTC m=+108.551443139 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-l7w75" (UniqueName: "kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75") pod "network-check-source-5bb8f5cd97-xdvz5" (UID: "f863fff9-286a-45fa-b8f0-8a86994b8440") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.478946 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b podName:17b87002-b798-480a-8e17-83053d698239 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:28.478925085 +0000 UTC m=+108.551530542 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-gwt8b" (UniqueName: "kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b") pod "network-check-target-fhkjl" (UID: "17b87002-b798-480a-8e17-83053d698239") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.571450 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.571486 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.571495 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.571512 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.571521 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.578863 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs\") pod \"network-metrics-daemon-qx4gj\" (UID: \"d655d34c-2969-43f2-8e93-455507c7cfda\") " pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.579080 5113 secret.go:189] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.579149 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs podName:d655d34c-2969-43f2-8e93-455507c7cfda nodeName:}" failed. No retries permitted until 2026-01-30 00:11:28.579119721 +0000 UTC m=+108.651725098 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs") pod "network-metrics-daemon-qx4gj" (UID: "d655d34c-2969-43f2-8e93-455507c7cfda") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.674591 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.674662 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.674685 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.674715 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.674738 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.679984 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.680253 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:28.680223055 +0000 UTC m=+108.752828432 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.772740 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.772753 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.773041 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.773100 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.773049 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.773192 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" podUID="f863fff9-286a-45fa-b8f0-8a86994b8440" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.773647 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:24 crc kubenswrapper[5113]: E0130 00:11:24.774010 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qx4gj" podUID="d655d34c-2969-43f2-8e93-455507c7cfda" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.777681 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.778008 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.778302 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.778629 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.778878 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.882337 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.883170 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.883191 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.883215 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.883235 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.986657 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.986735 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.986761 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.986835 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:24 crc kubenswrapper[5113]: I0130 00:11:24.986859 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:24Z","lastTransitionTime":"2026-01-30T00:11:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.089407 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.089459 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.089472 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.089493 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.089506 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.192081 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.192124 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.192136 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.192152 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.192164 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.294354 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.294407 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.294422 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.294441 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.294453 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.397097 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.397148 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.397160 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.397180 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.397197 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.500057 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.500110 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.500122 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.500141 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.500153 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.602650 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.602702 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.602712 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.602729 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.602920 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.705294 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.705343 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.705356 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.705370 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.705381 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.807653 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.807703 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.807713 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.807727 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.807740 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.910454 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.910594 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.910623 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.910659 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:25 crc kubenswrapper[5113]: I0130 00:11:25.910683 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:25Z","lastTransitionTime":"2026-01-30T00:11:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.013152 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.013223 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.013243 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.013269 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.013287 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.116246 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.116290 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.116302 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.116320 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.116331 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.219185 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.219752 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.219779 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.219812 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.219845 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.276128 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerStarted","Data":"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb"} Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.278946 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" event={"ID":"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1","Type":"ContainerDied","Data":"891ecd206bd6477afee018fb8f3cb990a08b07a8e9859df55d7ad391b5b0e931"} Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.278900 5113 generic.go:358] "Generic (PLEG): container finished" podID="6ab4dd28-7902-4bc5-959f-47cd8cebb0c1" containerID="891ecd206bd6477afee018fb8f3cb990a08b07a8e9859df55d7ad391b5b0e931" exitCode=0 Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.321975 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.322025 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.322035 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.322052 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.322064 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.424390 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.424466 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.424485 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.424512 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.424567 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.526901 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.526940 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.526949 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.526965 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.526976 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.629443 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.629502 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.629515 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.629557 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.629570 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.732164 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.732247 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.732272 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.732304 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.732330 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.772280 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.772345 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.772497 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:26 crc kubenswrapper[5113]: E0130 00:11:26.772569 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:26 crc kubenswrapper[5113]: E0130 00:11:26.772725 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qx4gj" podUID="d655d34c-2969-43f2-8e93-455507c7cfda" Jan 30 00:11:26 crc kubenswrapper[5113]: E0130 00:11:26.772865 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.772936 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:26 crc kubenswrapper[5113]: E0130 00:11:26.773100 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" podUID="f863fff9-286a-45fa-b8f0-8a86994b8440" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.834997 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.835070 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.835089 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.835115 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.835139 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.937735 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.937789 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.937798 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.937812 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:26 crc kubenswrapper[5113]: I0130 00:11:26.937822 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:26Z","lastTransitionTime":"2026-01-30T00:11:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.040840 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.040906 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.040926 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.040950 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.040968 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.143439 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.143492 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.143507 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.143561 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.143575 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.249321 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.249412 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.249437 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.249474 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.249498 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.351702 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.351759 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.351777 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.351802 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.351824 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.454216 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.454259 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.454270 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.454287 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.454300 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.556851 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.556904 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.556915 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.556932 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.556947 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.661266 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.661354 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.661385 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.661418 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.661443 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.763926 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.763973 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.763986 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.764003 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.764014 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.866350 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.866405 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.866424 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.866447 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.866465 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.968296 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.968342 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.968356 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.968373 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:27 crc kubenswrapper[5113]: I0130 00:11:27.968386 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:27Z","lastTransitionTime":"2026-01-30T00:11:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.070871 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.070988 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.071009 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.071034 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.071053 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.174345 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.174417 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.174428 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.174448 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.174460 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.277751 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.277835 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.278035 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.278057 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.278071 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.294248 5113 generic.go:358] "Generic (PLEG): container finished" podID="6ab4dd28-7902-4bc5-959f-47cd8cebb0c1" containerID="8a046376ef25fa5b76742149cf122cb364201fcceb222dae6538360a53c343c4" exitCode=0 Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.294360 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" event={"ID":"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1","Type":"ContainerDied","Data":"8a046376ef25fa5b76742149cf122cb364201fcceb222dae6538360a53c343c4"} Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.380236 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.380633 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.380649 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.380670 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.380683 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.484614 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.484707 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.484719 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.484735 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.484747 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.529637 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.530284 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.530338 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gwt8b\" (UniqueName: \"kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b\") pod \"network-check-target-fhkjl\" (UID: \"17b87002-b798-480a-8e17-83053d698239\") " pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.530370 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-l7w75\" (UniqueName: \"kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75\") pod \"network-check-source-5bb8f5cd97-xdvz5\" (UID: \"f863fff9-286a-45fa-b8f0-8a86994b8440\") " pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.529968 5113 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.530614 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:36.530597656 +0000 UTC m=+116.603203033 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.530546 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.531316 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.531329 5113 projected.go:194] Error preparing data for projected volume kube-api-access-l7w75 for pod openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.531360 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75 podName:f863fff9-286a-45fa-b8f0-8a86994b8440 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:36.531351539 +0000 UTC m=+116.603956916 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-l7w75" (UniqueName: "kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75") pod "network-check-source-5bb8f5cd97-xdvz5" (UID: "f863fff9-286a-45fa-b8f0-8a86994b8440") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.531412 5113 secret.go:189] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.531547 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:36.531510864 +0000 UTC m=+116.604116281 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.531431 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.531605 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.531620 5113 projected.go:194] Error preparing data for projected volume kube-api-access-gwt8b for pod openshift-network-diagnostics/network-check-target-fhkjl: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.531662 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b podName:17b87002-b798-480a-8e17-83053d698239 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:36.531653248 +0000 UTC m=+116.604258625 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-gwt8b" (UniqueName: "kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b") pod "network-check-target-fhkjl" (UID: "17b87002-b798-480a-8e17-83053d698239") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.586835 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.587199 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.587282 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.587373 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.587484 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.631736 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs\") pod \"network-metrics-daemon-qx4gj\" (UID: \"d655d34c-2969-43f2-8e93-455507c7cfda\") " pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.631879 5113 secret.go:189] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.632137 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs podName:d655d34c-2969-43f2-8e93-455507c7cfda nodeName:}" failed. No retries permitted until 2026-01-30 00:11:36.632117473 +0000 UTC m=+116.704722860 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs") pod "network-metrics-daemon-qx4gj" (UID: "d655d34c-2969-43f2-8e93-455507c7cfda") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.689680 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.689850 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.689943 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.690035 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.690122 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.733032 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.733267 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:36.733235528 +0000 UTC m=+116.805840925 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.772354 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.772496 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.772914 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.772987 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.773066 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.773143 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qx4gj" podUID="d655d34c-2969-43f2-8e93-455507c7cfda" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.773502 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:28 crc kubenswrapper[5113]: E0130 00:11:28.773773 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" podUID="f863fff9-286a-45fa-b8f0-8a86994b8440" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.794582 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.794691 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.794707 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.794752 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.794768 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.897584 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.897643 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.897655 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.897674 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:28 crc kubenswrapper[5113]: I0130 00:11:28.897688 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:28Z","lastTransitionTime":"2026-01-30T00:11:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.000097 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.000177 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.000203 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.000235 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.000261 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.102750 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.102840 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.102872 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.102904 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.102929 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.205496 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.205610 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.205633 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.205665 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.205695 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.307095 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerStarted","Data":"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847"} Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.307823 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.307910 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.307925 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.308046 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.308107 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.308126 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.308150 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.308169 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.315414 5113 generic.go:358] "Generic (PLEG): container finished" podID="6ab4dd28-7902-4bc5-959f-47cd8cebb0c1" containerID="c779e6c7f98399477106c296c0cd12c022d30509ec9bac2ab1bc07373baa914a" exitCode=0 Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.315465 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" event={"ID":"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1","Type":"ContainerDied","Data":"c779e6c7f98399477106c296c0cd12c022d30509ec9bac2ab1bc07373baa914a"} Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.354856 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" podStartSLOduration=87.35483621 podStartE2EDuration="1m27.35483621s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:29.353513779 +0000 UTC m=+109.426119156" watchObservedRunningTime="2026-01-30 00:11:29.35483621 +0000 UTC m=+109.427441587" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.445209 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.445265 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.445276 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.445296 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.445307 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.448230 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.451371 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.548210 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.548255 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.548264 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.548283 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.548295 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.651232 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.651284 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.651302 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.651329 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.651347 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.753560 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.753625 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.753644 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.753671 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.753694 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.855875 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.855923 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.855936 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.855957 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.855976 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.958592 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.958639 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.958650 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.958665 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:29 crc kubenswrapper[5113]: I0130 00:11:29.958675 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:29Z","lastTransitionTime":"2026-01-30T00:11:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.062080 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.062634 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.062668 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.062693 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.062711 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:30Z","lastTransitionTime":"2026-01-30T00:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.165053 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.165109 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.165121 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.165144 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.165159 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:30Z","lastTransitionTime":"2026-01-30T00:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.227188 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.227252 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.227268 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.227288 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.227302 5113 setters.go:618] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T00:11:30Z","lastTransitionTime":"2026-01-30T00:11:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.290843 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56"] Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.337194 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" event={"ID":"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1","Type":"ContainerStarted","Data":"dd0628694ff5c2f83dee048e54c6350b0e560c2501455285ec052715a3ff248a"} Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.338154 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.342619 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-version\"/\"openshift-service-ca.crt\"" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.342670 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-version\"/\"kube-root-ca.crt\"" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.342754 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-version\"/\"cluster-version-operator-serving-cert\"" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.342668 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-version\"/\"default-dockercfg-hqpm5\"" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.457290 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e6808822-94ca-4614-a36b-9f864908f6cb-service-ca\") pod \"cluster-version-operator-7c9b9cfd6-l6d56\" (UID: \"e6808822-94ca-4614-a36b-9f864908f6cb\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.458252 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e6808822-94ca-4614-a36b-9f864908f6cb-serving-cert\") pod \"cluster-version-operator-7c9b9cfd6-l6d56\" (UID: \"e6808822-94ca-4614-a36b-9f864908f6cb\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.458290 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e6808822-94ca-4614-a36b-9f864908f6cb-kube-api-access\") pod \"cluster-version-operator-7c9b9cfd6-l6d56\" (UID: \"e6808822-94ca-4614-a36b-9f864908f6cb\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.458332 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/e6808822-94ca-4614-a36b-9f864908f6cb-etc-ssl-certs\") pod \"cluster-version-operator-7c9b9cfd6-l6d56\" (UID: \"e6808822-94ca-4614-a36b-9f864908f6cb\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.458367 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/e6808822-94ca-4614-a36b-9f864908f6cb-etc-cvo-updatepayloads\") pod \"cluster-version-operator-7c9b9cfd6-l6d56\" (UID: \"e6808822-94ca-4614-a36b-9f864908f6cb\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.566583 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e6808822-94ca-4614-a36b-9f864908f6cb-kube-api-access\") pod \"cluster-version-operator-7c9b9cfd6-l6d56\" (UID: \"e6808822-94ca-4614-a36b-9f864908f6cb\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.566648 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/e6808822-94ca-4614-a36b-9f864908f6cb-etc-ssl-certs\") pod \"cluster-version-operator-7c9b9cfd6-l6d56\" (UID: \"e6808822-94ca-4614-a36b-9f864908f6cb\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.566670 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/e6808822-94ca-4614-a36b-9f864908f6cb-etc-cvo-updatepayloads\") pod \"cluster-version-operator-7c9b9cfd6-l6d56\" (UID: \"e6808822-94ca-4614-a36b-9f864908f6cb\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.566698 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e6808822-94ca-4614-a36b-9f864908f6cb-service-ca\") pod \"cluster-version-operator-7c9b9cfd6-l6d56\" (UID: \"e6808822-94ca-4614-a36b-9f864908f6cb\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.566789 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e6808822-94ca-4614-a36b-9f864908f6cb-serving-cert\") pod \"cluster-version-operator-7c9b9cfd6-l6d56\" (UID: \"e6808822-94ca-4614-a36b-9f864908f6cb\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.567278 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/e6808822-94ca-4614-a36b-9f864908f6cb-etc-ssl-certs\") pod \"cluster-version-operator-7c9b9cfd6-l6d56\" (UID: \"e6808822-94ca-4614-a36b-9f864908f6cb\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.567756 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/e6808822-94ca-4614-a36b-9f864908f6cb-etc-cvo-updatepayloads\") pod \"cluster-version-operator-7c9b9cfd6-l6d56\" (UID: \"e6808822-94ca-4614-a36b-9f864908f6cb\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.569806 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/e6808822-94ca-4614-a36b-9f864908f6cb-service-ca\") pod \"cluster-version-operator-7c9b9cfd6-l6d56\" (UID: \"e6808822-94ca-4614-a36b-9f864908f6cb\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.583285 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e6808822-94ca-4614-a36b-9f864908f6cb-serving-cert\") pod \"cluster-version-operator-7c9b9cfd6-l6d56\" (UID: \"e6808822-94ca-4614-a36b-9f864908f6cb\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.590732 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e6808822-94ca-4614-a36b-9f864908f6cb-kube-api-access\") pod \"cluster-version-operator-7c9b9cfd6-l6d56\" (UID: \"e6808822-94ca-4614-a36b-9f864908f6cb\") " pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.661851 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" Jan 30 00:11:30 crc kubenswrapper[5113]: W0130 00:11:30.688708 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode6808822_94ca_4614_a36b_9f864908f6cb.slice/crio-bfcf5ee843e224eb38571e40060fde78a82226118e12f7476513561a2bbe35db WatchSource:0}: Error finding container bfcf5ee843e224eb38571e40060fde78a82226118e12f7476513561a2bbe35db: Status 404 returned error can't find the container with id bfcf5ee843e224eb38571e40060fde78a82226118e12f7476513561a2bbe35db Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.766089 5113 certificate_manager.go:566] "Rotating certificates" logger="kubernetes.io/kubelet-serving" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.772914 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:30 crc kubenswrapper[5113]: E0130 00:11:30.773171 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.773342 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:30 crc kubenswrapper[5113]: E0130 00:11:30.775936 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.776220 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:30 crc kubenswrapper[5113]: E0130 00:11:30.776440 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" podUID="f863fff9-286a-45fa-b8f0-8a86994b8440" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.776459 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:30 crc kubenswrapper[5113]: E0130 00:11:30.776648 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qx4gj" podUID="d655d34c-2969-43f2-8e93-455507c7cfda" Jan 30 00:11:30 crc kubenswrapper[5113]: I0130 00:11:30.778052 5113 reflector.go:430] "Caches populated" logger="kubernetes.io/kubelet-serving" type="*v1.CertificateSigningRequest" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" Jan 30 00:11:31 crc kubenswrapper[5113]: I0130 00:11:31.332747 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" event={"ID":"e6808822-94ca-4614-a36b-9f864908f6cb","Type":"ContainerStarted","Data":"fb680c261e5e557fda2ea8e7c0656c508effe9cf4068129cd3bb9dc09966024b"} Jan 30 00:11:31 crc kubenswrapper[5113]: I0130 00:11:31.332879 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" event={"ID":"e6808822-94ca-4614-a36b-9f864908f6cb","Type":"ContainerStarted","Data":"bfcf5ee843e224eb38571e40060fde78a82226118e12f7476513561a2bbe35db"} Jan 30 00:11:31 crc kubenswrapper[5113]: I0130 00:11:31.358865 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-7c9b9cfd6-l6d56" podStartSLOduration=89.358832516 podStartE2EDuration="1m29.358832516s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:31.355452021 +0000 UTC m=+111.428057438" watchObservedRunningTime="2026-01-30 00:11:31.358832516 +0000 UTC m=+111.431437933" Jan 30 00:11:32 crc kubenswrapper[5113]: I0130 00:11:32.772478 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:32 crc kubenswrapper[5113]: E0130 00:11:32.772696 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:32 crc kubenswrapper[5113]: I0130 00:11:32.772933 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:32 crc kubenswrapper[5113]: I0130 00:11:32.772978 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:32 crc kubenswrapper[5113]: I0130 00:11:32.772952 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:32 crc kubenswrapper[5113]: E0130 00:11:32.773246 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:32 crc kubenswrapper[5113]: E0130 00:11:32.773456 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qx4gj" podUID="d655d34c-2969-43f2-8e93-455507c7cfda" Jan 30 00:11:32 crc kubenswrapper[5113]: E0130 00:11:32.773591 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" podUID="f863fff9-286a-45fa-b8f0-8a86994b8440" Jan 30 00:11:32 crc kubenswrapper[5113]: I0130 00:11:32.967584 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-qx4gj"] Jan 30 00:11:33 crc kubenswrapper[5113]: I0130 00:11:33.340241 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:33 crc kubenswrapper[5113]: E0130 00:11:33.340478 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qx4gj" podUID="d655d34c-2969-43f2-8e93-455507c7cfda" Jan 30 00:11:33 crc kubenswrapper[5113]: I0130 00:11:33.772715 5113 scope.go:117] "RemoveContainer" containerID="524b98e238697d411aaaba56575ee93f25656aefb79f17572a1504e3f52a32ef" Jan 30 00:11:34 crc kubenswrapper[5113]: I0130 00:11:34.358155 5113 generic.go:358] "Generic (PLEG): container finished" podID="6ab4dd28-7902-4bc5-959f-47cd8cebb0c1" containerID="dd0628694ff5c2f83dee048e54c6350b0e560c2501455285ec052715a3ff248a" exitCode=0 Jan 30 00:11:34 crc kubenswrapper[5113]: I0130 00:11:34.358382 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" event={"ID":"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1","Type":"ContainerDied","Data":"dd0628694ff5c2f83dee048e54c6350b0e560c2501455285ec052715a3ff248a"} Jan 30 00:11:34 crc kubenswrapper[5113]: I0130 00:11:34.361769 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/3.log" Jan 30 00:11:34 crc kubenswrapper[5113]: I0130 00:11:34.371694 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"3a14caf222afb62aaabdc47808b6f944","Type":"ContainerStarted","Data":"a9ddcb5477ffdd58c27676a12d79a7574b5e7d2ad48fd0a4349b255dd4bd3dd2"} Jan 30 00:11:34 crc kubenswrapper[5113]: I0130 00:11:34.772829 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:34 crc kubenswrapper[5113]: I0130 00:11:34.772834 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:34 crc kubenswrapper[5113]: I0130 00:11:34.772843 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:34 crc kubenswrapper[5113]: E0130 00:11:34.773508 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" podUID="f863fff9-286a-45fa-b8f0-8a86994b8440" Jan 30 00:11:34 crc kubenswrapper[5113]: E0130 00:11:34.773626 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:34 crc kubenswrapper[5113]: E0130 00:11:34.773356 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:35 crc kubenswrapper[5113]: I0130 00:11:35.380395 5113 generic.go:358] "Generic (PLEG): container finished" podID="6ab4dd28-7902-4bc5-959f-47cd8cebb0c1" containerID="e8f6dc69e86f58b351cc12a3e373631a3a540a3a9304457e031c8421278bd32d" exitCode=0 Jan 30 00:11:35 crc kubenswrapper[5113]: I0130 00:11:35.380501 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" event={"ID":"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1","Type":"ContainerDied","Data":"e8f6dc69e86f58b351cc12a3e373631a3a540a3a9304457e031c8421278bd32d"} Jan 30 00:11:35 crc kubenswrapper[5113]: I0130 00:11:35.411973 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=15.411949283 podStartE2EDuration="15.411949283s" podCreationTimestamp="2026-01-30 00:11:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:35.410017492 +0000 UTC m=+115.482622939" watchObservedRunningTime="2026-01-30 00:11:35.411949283 +0000 UTC m=+115.484554690" Jan 30 00:11:35 crc kubenswrapper[5113]: I0130 00:11:35.772786 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:35 crc kubenswrapper[5113]: E0130 00:11:35.772972 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qx4gj" podUID="d655d34c-2969-43f2-8e93-455507c7cfda" Jan 30 00:11:36 crc kubenswrapper[5113]: I0130 00:11:36.545949 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-l7w75\" (UniqueName: \"kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75\") pod \"network-check-source-5bb8f5cd97-xdvz5\" (UID: \"f863fff9-286a-45fa-b8f0-8a86994b8440\") " pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:36 crc kubenswrapper[5113]: I0130 00:11:36.546023 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:36 crc kubenswrapper[5113]: I0130 00:11:36.546051 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.546204 5113 secret.go:189] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.546246 5113 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:36 crc kubenswrapper[5113]: I0130 00:11:36.546334 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gwt8b\" (UniqueName: \"kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b\") pod \"network-check-target-fhkjl\" (UID: \"17b87002-b798-480a-8e17-83053d698239\") " pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.546318 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.5462706 +0000 UTC m=+132.618875977 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.546472 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.546547 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.546552 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.546571 5113 projected.go:194] Error preparing data for projected volume kube-api-access-l7w75 for pod openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.546587 5113 projected.go:289] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.546589 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf podName:6a9ae5f6-97bd-46ac-bafa-ca1b4452a141 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.546458886 +0000 UTC m=+132.619064263 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf") pod "networking-console-plugin-5ff7774fd9-nljh6" (UID: "6a9ae5f6-97bd-46ac-bafa-ca1b4452a141") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.546600 5113 projected.go:194] Error preparing data for projected volume kube-api-access-gwt8b for pod openshift-network-diagnostics/network-check-target-fhkjl: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.546660 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75 podName:f863fff9-286a-45fa-b8f0-8a86994b8440 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.546646242 +0000 UTC m=+132.619251619 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-l7w75" (UniqueName: "kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75") pod "network-check-source-5bb8f5cd97-xdvz5" (UID: "f863fff9-286a-45fa-b8f0-8a86994b8440") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.546777 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b podName:17b87002-b798-480a-8e17-83053d698239 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.546740655 +0000 UTC m=+132.619346032 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-gwt8b" (UniqueName: "kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b") pod "network-check-target-fhkjl" (UID: "17b87002-b798-480a-8e17-83053d698239") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 00:11:36 crc kubenswrapper[5113]: I0130 00:11:36.648499 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs\") pod \"network-metrics-daemon-qx4gj\" (UID: \"d655d34c-2969-43f2-8e93-455507c7cfda\") " pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.648808 5113 secret.go:189] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.648944 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs podName:d655d34c-2969-43f2-8e93-455507c7cfda nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.648915403 +0000 UTC m=+132.721520820 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs") pod "network-metrics-daemon-qx4gj" (UID: "d655d34c-2969-43f2-8e93-455507c7cfda") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 00:11:36 crc kubenswrapper[5113]: I0130 00:11:36.749779 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.750082 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.750039928 +0000 UTC m=+132.822645345 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:36 crc kubenswrapper[5113]: I0130 00:11:36.772776 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:36 crc kubenswrapper[5113]: I0130 00:11:36.772815 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.772976 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-fhkjl" podUID="17b87002-b798-480a-8e17-83053d698239" Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.773150 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" podUID="6a9ae5f6-97bd-46ac-bafa-ca1b4452a141" Jan 30 00:11:36 crc kubenswrapper[5113]: I0130 00:11:36.773274 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:36 crc kubenswrapper[5113]: E0130 00:11:36.773499 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" podUID="f863fff9-286a-45fa-b8f0-8a86994b8440" Jan 30 00:11:37 crc kubenswrapper[5113]: I0130 00:11:37.393489 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" event={"ID":"6ab4dd28-7902-4bc5-959f-47cd8cebb0c1","Type":"ContainerStarted","Data":"73aad1736c531dd2794e2552f4f71fcce49a74f8759deb831cf84f9abc6b286f"} Jan 30 00:11:37 crc kubenswrapper[5113]: I0130 00:11:37.448902 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-t4r5k" podStartSLOduration=95.448877962 podStartE2EDuration="1m35.448877962s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:37.448193011 +0000 UTC m=+117.520798428" watchObservedRunningTime="2026-01-30 00:11:37.448877962 +0000 UTC m=+117.521483339" Jan 30 00:11:37 crc kubenswrapper[5113]: I0130 00:11:37.772680 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:37 crc kubenswrapper[5113]: E0130 00:11:37.772893 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qx4gj" podUID="d655d34c-2969-43f2-8e93-455507c7cfda" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.005233 5113 kubelet_node_status.go:736] "Recording event message for node" node="crc" event="NodeReady" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.005607 5113 kubelet_node_status.go:550] "Fast updating node status as it just became ready" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.051278 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-65b6cccf98-8rbrn"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.058673 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.060443 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.062252 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"config\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.062629 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"client-ca\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.063611 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.063765 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"openshift-service-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.063799 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"kube-root-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.063980 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-controller-manager\"/\"openshift-controller-manager-sa-dockercfg-djmfg\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.064069 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4" Jan 30 00:11:38 crc kubenswrapper[5113]: E0130 00:11:38.067346 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-cluster-samples-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" reflector="object-\"openshift-cluster-samples-operator\"/\"openshift-service-ca.crt\"" type="*v1.ConfigMap" Jan 30 00:11:38 crc kubenswrapper[5113]: E0130 00:11:38.067684 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-cluster-samples-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" reflector="object-\"openshift-cluster-samples-operator\"/\"kube-root-ca.crt\"" type="*v1.ConfigMap" Jan 30 00:11:38 crc kubenswrapper[5113]: E0130 00:11:38.067672 5113 reflector.go:200] "Failed to watch" err="failed to list *v1.Secret: secrets \"samples-operator-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-cluster-samples-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" reflector="object-\"openshift-cluster-samples-operator\"/\"samples-operator-tls\"" type="*v1.Secret" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.068738 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-9ddfb9f55-s5qkt"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.072305 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-755bb95488-krhlw"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.073328 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.074868 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-samples-operator\"/\"cluster-samples-operator-dockercfg-jmhxf\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.077756 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"openshift-global-ca\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.083357 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"image-import-ca\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.083707 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-controller-manager\"/\"serving-cert\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.086030 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"etcd-serving-ca\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.087263 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.088019 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.095398 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-8596bd845d-9swvg"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.105951 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-pruner-29495520-b9gz9"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.106744 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.106816 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.111922 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.131128 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"kube-root-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.131274 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"config\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.132918 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-799b87ffcd-dbqgp"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.133813 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver\"/\"etcd-client\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.133836 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver\"/\"encryption-config-1\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.134005 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver\"/\"openshift-apiserver-sa-dockercfg-4zqgh\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.134157 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver\"/\"serving-cert\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.134314 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-machine-approver\"/\"kube-rbac-proxy\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.134512 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-machine-approver\"/\"kube-root-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.134570 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"audit-1\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.134591 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-machine-approver\"/\"machine-approver-config\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.134659 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"trusted-ca-bundle\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.134708 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-machine-approver\"/\"machine-approver-sa-dockercfg-wzhvk\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.134764 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"openshift-service-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.134780 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-machine-approver\"/\"machine-approver-tls\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.134815 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-machine-approver\"/\"openshift-service-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.141004 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-5777786469-kphvh"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.141943 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29495520-b9gz9" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.142514 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-route-controller-manager\"/\"config\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.142896 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-route-controller-manager\"/\"openshift-service-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.143354 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-api\"/\"machine-api-operator-images\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.152318 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.153304 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-799b87ffcd-dbqgp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.153337 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-api\"/\"openshift-service-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.153552 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-oauth-apiserver\"/\"encryption-config-1\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.153803 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"machine-api-operator-tls\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.154049 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-route-controller-manager\"/\"serving-cert\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.154430 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-route-controller-manager\"/\"route-controller-manager-sa-dockercfg-mmcpt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.154657 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-oauth-apiserver\"/\"oauth-apiserver-sa-dockercfg-qqw4z\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.154780 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-api\"/\"kube-rbac-proxy\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.154881 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"pruner-dockercfg-rs58m\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.155001 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"serviceca\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.155093 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"machine-api-operator-dockercfg-6n5ln\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.155246 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"openshift-service-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.155236 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-route-controller-manager\"/\"client-ca\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.155361 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-api\"/\"kube-root-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.155418 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"etcd-serving-ca\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.155454 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"kube-root-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.155560 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-route-controller-manager\"/\"kube-root-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.155711 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"trusted-ca-bundle\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.156073 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-console/console-64d44f6ddf-6z7rp"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.156495 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.161729 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-67c89758df-qh9jw"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.161888 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.162305 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.163205 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"audit-1\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.163795 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.163811 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-oauth-apiserver\"/\"serving-cert\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.164105 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-config-operator\"/\"config-operator-serving-cert\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.164282 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns-operator\"/\"dns-operator-dockercfg-wbbsn\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.164408 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.164536 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-config-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.164663 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-config-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.164830 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-config-operator\"/\"openshift-config-operator-dockercfg-sjn6s\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.164995 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"kube-root-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.165241 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-oauth-apiserver\"/\"etcd-client\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.165296 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-dockercfg-jcmfj\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.165988 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.166804 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns-operator\"/\"metrics-tls\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169223 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/28e0ef1a-f823-4898-90a3-66c67c5f19eb-serviceca\") pod \"image-pruner-29495520-b9gz9\" (UID: \"28e0ef1a-f823-4898-90a3-66c67c5f19eb\") " pod="openshift-image-registry/image-pruner-29495520-b9gz9" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169280 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4669eb3c-24d5-4643-91d1-de96326757fa-config\") pod \"route-controller-manager-776cdc94d6-99ddj\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169305 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/44725449-ac50-4ae0-935a-4d70c1a921f1-audit-dir\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169326 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/16a06343-d795-4c00-8684-13e9158ab544-serving-cert\") pod \"openshift-controller-manager-operator-686468bdd5-cddd8\" (UID: \"16a06343-d795-4c00-8684-13e9158ab544\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169347 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-proxy-ca-bundles\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169361 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/4669eb3c-24d5-4643-91d1-de96326757fa-tmp\") pod \"route-controller-manager-776cdc94d6-99ddj\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169380 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnnjj\" (UniqueName: \"kubernetes.io/projected/d334727d-3c8b-4f75-a3ea-a4b537fe480c-kube-api-access-gnnjj\") pod \"dns-operator-799b87ffcd-dbqgp\" (UID: \"d334727d-3c8b-4f75-a3ea-a4b537fe480c\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-dbqgp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169397 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/44725449-ac50-4ae0-935a-4d70c1a921f1-trusted-ca-bundle\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169417 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-config\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169437 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pwvn\" (UniqueName: \"kubernetes.io/projected/37c8062b-4496-46f0-9562-9f9d27740557-kube-api-access-7pwvn\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169451 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d334727d-3c8b-4f75-a3ea-a4b537fe480c-metrics-tls\") pod \"dns-operator-799b87ffcd-dbqgp\" (UID: \"d334727d-3c8b-4f75-a3ea-a4b537fe480c\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-dbqgp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169469 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/0c156d1d-c9e6-43e7-b515-7a9314879127-machine-api-operator-tls\") pod \"machine-api-operator-755bb95488-krhlw\" (UID: \"0c156d1d-c9e6-43e7-b515-7a9314879127\") " pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169489 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/37c8062b-4496-46f0-9562-9f9d27740557-image-import-ca\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169504 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/d334727d-3c8b-4f75-a3ea-a4b537fe480c-tmp-dir\") pod \"dns-operator-799b87ffcd-dbqgp\" (UID: \"d334727d-3c8b-4f75-a3ea-a4b537fe480c\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-dbqgp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169543 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bf7t\" (UniqueName: \"kubernetes.io/projected/2554dafb-e152-489f-a585-bfe5638c0b82-kube-api-access-6bf7t\") pod \"openshift-config-operator-5777786469-kphvh\" (UID: \"2554dafb-e152-489f-a585-bfe5638c0b82\") " pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169573 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/37c8062b-4496-46f0-9562-9f9d27740557-node-pullsecrets\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169593 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/c6737a55-918a-4c58-ac84-4e1f78ddff5e-machine-approver-tls\") pod \"machine-approver-54c688565-x4v7p\" (UID: \"c6737a55-918a-4c58-ac84-4e1f78ddff5e\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169610 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/37c8062b-4496-46f0-9562-9f9d27740557-etcd-client\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169627 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37c8062b-4496-46f0-9562-9f9d27740557-trusted-ca-bundle\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169645 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16a06343-d795-4c00-8684-13e9158ab544-config\") pod \"openshift-controller-manager-operator-686468bdd5-cddd8\" (UID: \"16a06343-d795-4c00-8684-13e9158ab544\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169668 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlbg8\" (UniqueName: \"kubernetes.io/projected/ffde9a75-3edf-462f-af90-c312c4f05986-kube-api-access-jlbg8\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169701 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2554dafb-e152-489f-a585-bfe5638c0b82-serving-cert\") pod \"openshift-config-operator-5777786469-kphvh\" (UID: \"2554dafb-e152-489f-a585-bfe5638c0b82\") " pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169718 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/2554dafb-e152-489f-a585-bfe5638c0b82-available-featuregates\") pod \"openshift-config-operator-5777786469-kphvh\" (UID: \"2554dafb-e152-489f-a585-bfe5638c0b82\") " pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169735 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvbqt\" (UniqueName: \"kubernetes.io/projected/28e0ef1a-f823-4898-90a3-66c67c5f19eb-kube-api-access-nvbqt\") pod \"image-pruner-29495520-b9gz9\" (UID: \"28e0ef1a-f823-4898-90a3-66c67c5f19eb\") " pod="openshift-image-registry/image-pruner-29495520-b9gz9" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169787 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhkx7\" (UniqueName: \"kubernetes.io/projected/c6737a55-918a-4c58-ac84-4e1f78ddff5e-kube-api-access-nhkx7\") pod \"machine-approver-54c688565-x4v7p\" (UID: \"c6737a55-918a-4c58-ac84-4e1f78ddff5e\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169806 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/44725449-ac50-4ae0-935a-4d70c1a921f1-etcd-client\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169828 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4669eb3c-24d5-4643-91d1-de96326757fa-client-ca\") pod \"route-controller-manager-776cdc94d6-99ddj\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169846 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/44725449-ac50-4ae0-935a-4d70c1a921f1-serving-cert\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169870 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pd6gr\" (UniqueName: \"kubernetes.io/projected/0c156d1d-c9e6-43e7-b515-7a9314879127-kube-api-access-pd6gr\") pod \"machine-api-operator-755bb95488-krhlw\" (UID: \"0c156d1d-c9e6-43e7-b515-7a9314879127\") " pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169893 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0c156d1d-c9e6-43e7-b515-7a9314879127-images\") pod \"machine-api-operator-755bb95488-krhlw\" (UID: \"0c156d1d-c9e6-43e7-b515-7a9314879127\") " pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169909 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9zkh5\" (UniqueName: \"kubernetes.io/projected/c4af1b2c-aad9-48c2-b1d7-36cd069c556d-kube-api-access-9zkh5\") pod \"cluster-samples-operator-6b564684c8-44zv4\" (UID: \"c4af1b2c-aad9-48c2-b1d7-36cd069c556d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169929 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-client-ca\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169943 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c156d1d-c9e6-43e7-b515-7a9314879127-config\") pod \"machine-api-operator-755bb95488-krhlw\" (UID: \"0c156d1d-c9e6-43e7-b515-7a9314879127\") " pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169958 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37c8062b-4496-46f0-9562-9f9d27740557-config\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169976 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4669eb3c-24d5-4643-91d1-de96326757fa-serving-cert\") pod \"route-controller-manager-776cdc94d6-99ddj\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.169998 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c6737a55-918a-4c58-ac84-4e1f78ddff5e-auth-proxy-config\") pod \"machine-approver-54c688565-x4v7p\" (UID: \"c6737a55-918a-4c58-ac84-4e1f78ddff5e\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.170024 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6737a55-918a-4c58-ac84-4e1f78ddff5e-config\") pod \"machine-approver-54c688565-x4v7p\" (UID: \"c6737a55-918a-4c58-ac84-4e1f78ddff5e\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.170049 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/37c8062b-4496-46f0-9562-9f9d27740557-audit\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.170068 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jf8mr\" (UniqueName: \"kubernetes.io/projected/44725449-ac50-4ae0-935a-4d70c1a921f1-kube-api-access-jf8mr\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.170093 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/44725449-ac50-4ae0-935a-4d70c1a921f1-etcd-serving-ca\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.170114 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vd9zl\" (UniqueName: \"kubernetes.io/projected/16a06343-d795-4c00-8684-13e9158ab544-kube-api-access-vd9zl\") pod \"openshift-controller-manager-operator-686468bdd5-cddd8\" (UID: \"16a06343-d795-4c00-8684-13e9158ab544\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.170133 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/ffde9a75-3edf-462f-af90-c312c4f05986-tmp\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.170178 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/37c8062b-4496-46f0-9562-9f9d27740557-serving-cert\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.170197 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/44725449-ac50-4ae0-935a-4d70c1a921f1-encryption-config\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.170224 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/44725449-ac50-4ae0-935a-4d70c1a921f1-audit-policies\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.170247 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbf8b\" (UniqueName: \"kubernetes.io/projected/4669eb3c-24d5-4643-91d1-de96326757fa-kube-api-access-xbf8b\") pod \"route-controller-manager-776cdc94d6-99ddj\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.170268 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/16a06343-d795-4c00-8684-13e9158ab544-tmp\") pod \"openshift-controller-manager-operator-686468bdd5-cddd8\" (UID: \"16a06343-d795-4c00-8684-13e9158ab544\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.170296 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/c4af1b2c-aad9-48c2-b1d7-36cd069c556d-samples-operator-tls\") pod \"cluster-samples-operator-6b564684c8-44zv4\" (UID: \"c4af1b2c-aad9-48c2-b1d7-36cd069c556d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.170313 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/37c8062b-4496-46f0-9562-9f9d27740557-etcd-serving-ca\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.170340 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffde9a75-3edf-462f-af90-c312c4f05986-serving-cert\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.170357 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/37c8062b-4496-46f0-9562-9f9d27740557-encryption-config\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.170374 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/37c8062b-4496-46f0-9562-9f9d27740557-audit-dir\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.170694 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.175478 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-config\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.175879 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"openshift-service-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.176118 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"oauth-serving-cert\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.176261 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-serving-cert\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.176436 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.176648 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console\"/\"console-oauth-config\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.176789 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console\"/\"console-dockercfg-8dkm8\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.177444 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.178461 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-67c89758df-qh9jw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.188508 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"console-config\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.188722 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"service-ca\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.189250 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console\"/\"console-serving-cert\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.192028 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"trusted-ca-bundle\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.199583 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-747b44746d-kxkvb"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.201712 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.203394 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.213485 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console-operator\"/\"console-operator-config\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.234303 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console-operator\"/\"console-operator-dockercfg-kl6m8\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.234426 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.237491 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console-operator\"/\"serving-cert\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.238588 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.238664 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.241402 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.242231 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console-operator\"/\"trusted-ca\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.243504 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.246687 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-operator\"/\"ingress-operator-dockercfg-74nwh\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.246829 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication-operator\"/\"authentication-operator-dockercfg-6tbpn\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.246865 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-operator\"/\"metrics-tls\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.246928 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"authentication-operator-config\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.248371 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication-operator\"/\"serving-cert\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.248885 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.249252 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"service-ca-bundle\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.249432 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.249837 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66587d64c8-bnj47"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.249962 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-747b44746d-kxkvb" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.250041 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.255903 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"cluster-image-registry-operator-dockercfg-ntnd7\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.256139 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console\"/\"default-dockercfg-mdwwj\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.259256 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-operator\"/\"trusted-ca\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.260816 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"image-registry-operator-tls\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.265690 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"trusted-ca-bundle\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.266013 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"trusted-ca\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.266012 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-66458b6674-65wnm"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.266170 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.273006 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"installation-pull-secrets\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.273371 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.273865 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"registry-dockercfg-6w67b\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.273008 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.273399 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"image-registry-tls\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.276986 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-service-ca\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.277495 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-template-provider-selection\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.278693 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-template-login\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.278855 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4669eb3c-24d5-4643-91d1-de96326757fa-serving-cert\") pod \"route-controller-manager-776cdc94d6-99ddj\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.278888 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c6737a55-918a-4c58-ac84-4e1f78ddff5e-auth-proxy-config\") pod \"machine-approver-54c688565-x4v7p\" (UID: \"c6737a55-918a-4c58-ac84-4e1f78ddff5e\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.278916 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6737a55-918a-4c58-ac84-4e1f78ddff5e-config\") pod \"machine-approver-54c688565-x4v7p\" (UID: \"c6737a55-918a-4c58-ac84-4e1f78ddff5e\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.278942 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/37c8062b-4496-46f0-9562-9f9d27740557-audit\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.278968 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-jf8mr\" (UniqueName: \"kubernetes.io/projected/44725449-ac50-4ae0-935a-4d70c1a921f1-kube-api-access-jf8mr\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.278993 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-console-serving-cert\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279013 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/44725449-ac50-4ae0-935a-4d70c1a921f1-etcd-serving-ca\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279030 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-vd9zl\" (UniqueName: \"kubernetes.io/projected/16a06343-d795-4c00-8684-13e9158ab544-kube-api-access-vd9zl\") pod \"openshift-controller-manager-operator-686468bdd5-cddd8\" (UID: \"16a06343-d795-4c00-8684-13e9158ab544\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279048 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-router-certs\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279054 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c-trusted-ca\") pod \"ingress-operator-6b9cb4dbcf-5cnkq\" (UID: \"70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279072 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c-bound-sa-token\") pod \"ingress-operator-6b9cb4dbcf-5cnkq\" (UID: \"70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279090 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/11b1745f-9b78-40f8-bb20-b2c2590e4f46-bound-sa-token\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279110 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/ffde9a75-3edf-462f-af90-c312c4f05986-tmp\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279129 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-service-ca\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279147 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41f05909-fc3b-4e9f-85e5-4df36ec3b431-config\") pod \"authentication-operator-7f5c659b84-twdjp\" (UID: \"41f05909-fc3b-4e9f-85e5-4df36ec3b431\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279163 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted-pem\" (UniqueName: \"kubernetes.io/empty-dir/11b1745f-9b78-40f8-bb20-b2c2590e4f46-ca-trust-extracted-pem\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279183 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-trusted-ca-bundle\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279201 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/37c8062b-4496-46f0-9562-9f9d27740557-serving-cert\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279221 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/44725449-ac50-4ae0-935a-4d70c1a921f1-encryption-config\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279396 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fde32e16-bb98-4b8e-9e25-fdaf5b8df655-serving-cert\") pod \"console-operator-67c89758df-qh9jw\" (UID: \"fde32e16-bb98-4b8e-9e25-fdaf5b8df655\") " pod="openshift-console-operator/console-operator-67c89758df-qh9jw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279447 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/44725449-ac50-4ae0-935a-4d70c1a921f1-audit-policies\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279472 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-oauth-serving-cert\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279503 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-xbf8b\" (UniqueName: \"kubernetes.io/projected/4669eb3c-24d5-4643-91d1-de96326757fa-kube-api-access-xbf8b\") pod \"route-controller-manager-776cdc94d6-99ddj\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279540 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-console-oauth-config\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279585 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c-metrics-tls\") pod \"ingress-operator-6b9cb4dbcf-5cnkq\" (UID: \"70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279615 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/16a06343-d795-4c00-8684-13e9158ab544-tmp\") pod \"openshift-controller-manager-operator-686468bdd5-cddd8\" (UID: \"16a06343-d795-4c00-8684-13e9158ab544\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.279643 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/c4af1b2c-aad9-48c2-b1d7-36cd069c556d-samples-operator-tls\") pod \"cluster-samples-operator-6b564684c8-44zv4\" (UID: \"c4af1b2c-aad9-48c2-b1d7-36cd069c556d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280183 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/ffde9a75-3edf-462f-af90-c312c4f05986-tmp\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280229 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/37c8062b-4496-46f0-9562-9f9d27740557-etcd-serving-ca\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280262 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scpsz\" (UniqueName: \"kubernetes.io/projected/fde32e16-bb98-4b8e-9e25-fdaf5b8df655-kube-api-access-scpsz\") pod \"console-operator-67c89758df-qh9jw\" (UID: \"fde32e16-bb98-4b8e-9e25-fdaf5b8df655\") " pod="openshift-console-operator/console-operator-67c89758df-qh9jw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280311 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffde9a75-3edf-462f-af90-c312c4f05986-serving-cert\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280357 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/37c8062b-4496-46f0-9562-9f9d27740557-encryption-config\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280382 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/37c8062b-4496-46f0-9562-9f9d27740557-audit-dir\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280401 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/28e0ef1a-f823-4898-90a3-66c67c5f19eb-serviceca\") pod \"image-pruner-29495520-b9gz9\" (UID: \"28e0ef1a-f823-4898-90a3-66c67c5f19eb\") " pod="openshift-image-registry/image-pruner-29495520-b9gz9" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280435 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4669eb3c-24d5-4643-91d1-de96326757fa-config\") pod \"route-controller-manager-776cdc94d6-99ddj\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280455 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/44725449-ac50-4ae0-935a-4d70c1a921f1-audit-dir\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280477 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/16a06343-d795-4c00-8684-13e9158ab544-serving-cert\") pod \"openshift-controller-manager-operator-686468bdd5-cddd8\" (UID: \"16a06343-d795-4c00-8684-13e9158ab544\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280497 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pww6f\" (UniqueName: \"kubernetes.io/projected/70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c-kube-api-access-pww6f\") pod \"ingress-operator-6b9cb4dbcf-5cnkq\" (UID: \"70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280624 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-proxy-ca-bundles\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280653 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/4669eb3c-24d5-4643-91d1-de96326757fa-tmp\") pod \"route-controller-manager-776cdc94d6-99ddj\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280673 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gnnjj\" (UniqueName: \"kubernetes.io/projected/d334727d-3c8b-4f75-a3ea-a4b537fe480c-kube-api-access-gnnjj\") pod \"dns-operator-799b87ffcd-dbqgp\" (UID: \"d334727d-3c8b-4f75-a3ea-a4b537fe480c\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-dbqgp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280697 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/44725449-ac50-4ae0-935a-4d70c1a921f1-trusted-ca-bundle\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280750 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-config\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280770 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-7pwvn\" (UniqueName: \"kubernetes.io/projected/37c8062b-4496-46f0-9562-9f9d27740557-kube-api-access-7pwvn\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280788 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d334727d-3c8b-4f75-a3ea-a4b537fe480c-metrics-tls\") pod \"dns-operator-799b87ffcd-dbqgp\" (UID: \"d334727d-3c8b-4f75-a3ea-a4b537fe480c\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-dbqgp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280809 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/0c156d1d-c9e6-43e7-b515-7a9314879127-machine-api-operator-tls\") pod \"machine-api-operator-755bb95488-krhlw\" (UID: \"0c156d1d-c9e6-43e7-b515-7a9314879127\") " pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280843 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/37c8062b-4496-46f0-9562-9f9d27740557-image-import-ca\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280861 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/d334727d-3c8b-4f75-a3ea-a4b537fe480c-tmp-dir\") pod \"dns-operator-799b87ffcd-dbqgp\" (UID: \"d334727d-3c8b-4f75-a3ea-a4b537fe480c\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-dbqgp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280938 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-6bf7t\" (UniqueName: \"kubernetes.io/projected/2554dafb-e152-489f-a585-bfe5638c0b82-kube-api-access-6bf7t\") pod \"openshift-config-operator-5777786469-kphvh\" (UID: \"2554dafb-e152-489f-a585-bfe5638c0b82\") " pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280971 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/37c8062b-4496-46f0-9562-9f9d27740557-node-pullsecrets\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.280988 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/c6737a55-918a-4c58-ac84-4e1f78ddff5e-machine-approver-tls\") pod \"machine-approver-54c688565-x4v7p\" (UID: \"c6737a55-918a-4c58-ac84-4e1f78ddff5e\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281009 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hj26t\" (UniqueName: \"kubernetes.io/projected/11b1745f-9b78-40f8-bb20-b2c2590e4f46-kube-api-access-hj26t\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281074 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/37c8062b-4496-46f0-9562-9f9d27740557-etcd-client\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281098 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37c8062b-4496-46f0-9562-9f9d27740557-trusted-ca-bundle\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281134 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16a06343-d795-4c00-8684-13e9158ab544-config\") pod \"openshift-controller-manager-operator-686468bdd5-cddd8\" (UID: \"16a06343-d795-4c00-8684-13e9158ab544\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281159 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/41f05909-fc3b-4e9f-85e5-4df36ec3b431-trusted-ca-bundle\") pod \"authentication-operator-7f5c659b84-twdjp\" (UID: \"41f05909-fc3b-4e9f-85e5-4df36ec3b431\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281180 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/41f05909-fc3b-4e9f-85e5-4df36ec3b431-service-ca-bundle\") pod \"authentication-operator-7f5c659b84-twdjp\" (UID: \"41f05909-fc3b-4e9f-85e5-4df36ec3b431\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281197 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fde32e16-bb98-4b8e-9e25-fdaf5b8df655-config\") pod \"console-operator-67c89758df-qh9jw\" (UID: \"fde32e16-bb98-4b8e-9e25-fdaf5b8df655\") " pod="openshift-console-operator/console-operator-67c89758df-qh9jw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281216 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-jlbg8\" (UniqueName: \"kubernetes.io/projected/ffde9a75-3edf-462f-af90-c312c4f05986-kube-api-access-jlbg8\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281257 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2554dafb-e152-489f-a585-bfe5638c0b82-serving-cert\") pod \"openshift-config-operator-5777786469-kphvh\" (UID: \"2554dafb-e152-489f-a585-bfe5638c0b82\") " pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281273 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/2554dafb-e152-489f-a585-bfe5638c0b82-available-featuregates\") pod \"openshift-config-operator-5777786469-kphvh\" (UID: \"2554dafb-e152-489f-a585-bfe5638c0b82\") " pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281291 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-nvbqt\" (UniqueName: \"kubernetes.io/projected/28e0ef1a-f823-4898-90a3-66c67c5f19eb-kube-api-access-nvbqt\") pod \"image-pruner-29495520-b9gz9\" (UID: \"28e0ef1a-f823-4898-90a3-66c67c5f19eb\") " pod="openshift-image-registry/image-pruner-29495520-b9gz9" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281312 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-console-config\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281330 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fde32e16-bb98-4b8e-9e25-fdaf5b8df655-trusted-ca\") pod \"console-operator-67c89758df-qh9jw\" (UID: \"fde32e16-bb98-4b8e-9e25-fdaf5b8df655\") " pod="openshift-console-operator/console-operator-67c89758df-qh9jw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281354 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/11b1745f-9b78-40f8-bb20-b2c2590e4f46-image-registry-operator-tls\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281384 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-nhkx7\" (UniqueName: \"kubernetes.io/projected/c6737a55-918a-4c58-ac84-4e1f78ddff5e-kube-api-access-nhkx7\") pod \"machine-approver-54c688565-x4v7p\" (UID: \"c6737a55-918a-4c58-ac84-4e1f78ddff5e\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281402 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/44725449-ac50-4ae0-935a-4d70c1a921f1-etcd-client\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281447 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pt4xg\" (UniqueName: \"kubernetes.io/projected/41f05909-fc3b-4e9f-85e5-4df36ec3b431-kube-api-access-pt4xg\") pod \"authentication-operator-7f5c659b84-twdjp\" (UID: \"41f05909-fc3b-4e9f-85e5-4df36ec3b431\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281467 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8zc5\" (UniqueName: \"kubernetes.io/projected/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-kube-api-access-d8zc5\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281484 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/11b1745f-9b78-40f8-bb20-b2c2590e4f46-tmp\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281507 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4669eb3c-24d5-4643-91d1-de96326757fa-client-ca\") pod \"route-controller-manager-776cdc94d6-99ddj\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281539 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/44725449-ac50-4ae0-935a-4d70c1a921f1-serving-cert\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281556 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41f05909-fc3b-4e9f-85e5-4df36ec3b431-serving-cert\") pod \"authentication-operator-7f5c659b84-twdjp\" (UID: \"41f05909-fc3b-4e9f-85e5-4df36ec3b431\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281573 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwrx4\" (UniqueName: \"kubernetes.io/projected/d6483c17-196a-4e41-8950-46d60c5505c9-kube-api-access-qwrx4\") pod \"downloads-747b44746d-kxkvb\" (UID: \"d6483c17-196a-4e41-8950-46d60c5505c9\") " pod="openshift-console/downloads-747b44746d-kxkvb" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281597 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-pd6gr\" (UniqueName: \"kubernetes.io/projected/0c156d1d-c9e6-43e7-b515-7a9314879127-kube-api-access-pd6gr\") pod \"machine-api-operator-755bb95488-krhlw\" (UID: \"0c156d1d-c9e6-43e7-b515-7a9314879127\") " pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281621 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0c156d1d-c9e6-43e7-b515-7a9314879127-images\") pod \"machine-api-operator-755bb95488-krhlw\" (UID: \"0c156d1d-c9e6-43e7-b515-7a9314879127\") " pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281639 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-9zkh5\" (UniqueName: \"kubernetes.io/projected/c4af1b2c-aad9-48c2-b1d7-36cd069c556d-kube-api-access-9zkh5\") pod \"cluster-samples-operator-6b564684c8-44zv4\" (UID: \"c4af1b2c-aad9-48c2-b1d7-36cd069c556d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.281657 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/11b1745f-9b78-40f8-bb20-b2c2590e4f46-trusted-ca\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.282179 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-client-ca\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.282206 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c156d1d-c9e6-43e7-b515-7a9314879127-config\") pod \"machine-api-operator-755bb95488-krhlw\" (UID: \"0c156d1d-c9e6-43e7-b515-7a9314879127\") " pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.282232 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37c8062b-4496-46f0-9562-9f9d27740557-config\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.293836 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/37c8062b-4496-46f0-9562-9f9d27740557-etcd-serving-ca\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.294540 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/37c8062b-4496-46f0-9562-9f9d27740557-audit-dir\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.295244 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.295351 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/28e0ef1a-f823-4898-90a3-66c67c5f19eb-serviceca\") pod \"image-pruner-29495520-b9gz9\" (UID: \"28e0ef1a-f823-4898-90a3-66c67c5f19eb\") " pod="openshift-image-registry/image-pruner-29495520-b9gz9" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.295836 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.296303 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/37c8062b-4496-46f0-9562-9f9d27740557-image-import-ca\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.296437 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4669eb3c-24d5-4643-91d1-de96326757fa-config\") pod \"route-controller-manager-776cdc94d6-99ddj\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.296483 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/44725449-ac50-4ae0-935a-4d70c1a921f1-audit-dir\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.297434 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/2554dafb-e152-489f-a585-bfe5638c0b82-available-featuregates\") pod \"openshift-config-operator-5777786469-kphvh\" (UID: \"2554dafb-e152-489f-a585-bfe5638c0b82\") " pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.298098 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/44725449-ac50-4ae0-935a-4d70c1a921f1-trusted-ca-bundle\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.298243 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/d334727d-3c8b-4f75-a3ea-a4b537fe480c-tmp-dir\") pod \"dns-operator-799b87ffcd-dbqgp\" (UID: \"d334727d-3c8b-4f75-a3ea-a4b537fe480c\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-dbqgp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.298575 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/4669eb3c-24d5-4643-91d1-de96326757fa-tmp\") pod \"route-controller-manager-776cdc94d6-99ddj\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.299943 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-proxy-ca-bundles\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.301338 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/16a06343-d795-4c00-8684-13e9158ab544-serving-cert\") pod \"openshift-controller-manager-operator-686468bdd5-cddd8\" (UID: \"16a06343-d795-4c00-8684-13e9158ab544\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.301335 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/c6737a55-918a-4c58-ac84-4e1f78ddff5e-machine-approver-tls\") pod \"machine-approver-54c688565-x4v7p\" (UID: \"c6737a55-918a-4c58-ac84-4e1f78ddff5e\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.302101 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4669eb3c-24d5-4643-91d1-de96326757fa-client-ca\") pod \"route-controller-manager-776cdc94d6-99ddj\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.302596 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-config\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.302639 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/d334727d-3c8b-4f75-a3ea-a4b537fe480c-metrics-tls\") pod \"dns-operator-799b87ffcd-dbqgp\" (UID: \"d334727d-3c8b-4f75-a3ea-a4b537fe480c\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-dbqgp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.303047 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/37c8062b-4496-46f0-9562-9f9d27740557-node-pullsecrets\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.303644 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0c156d1d-c9e6-43e7-b515-7a9314879127-images\") pod \"machine-api-operator-755bb95488-krhlw\" (UID: \"0c156d1d-c9e6-43e7-b515-7a9314879127\") " pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.303768 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-client-ca\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.304258 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c156d1d-c9e6-43e7-b515-7a9314879127-config\") pod \"machine-api-operator-755bb95488-krhlw\" (UID: \"0c156d1d-c9e6-43e7-b515-7a9314879127\") " pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.304402 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37c8062b-4496-46f0-9562-9f9d27740557-config\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.307345 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/37c8062b-4496-46f0-9562-9f9d27740557-trusted-ca-bundle\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.308490 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/16a06343-d795-4c00-8684-13e9158ab544-config\") pod \"openshift-controller-manager-operator-686468bdd5-cddd8\" (UID: \"16a06343-d795-4c00-8684-13e9158ab544\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.308867 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/37c8062b-4496-46f0-9562-9f9d27740557-etcd-client\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.310028 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/c6737a55-918a-4c58-ac84-4e1f78ddff5e-auth-proxy-config\") pod \"machine-approver-54c688565-x4v7p\" (UID: \"c6737a55-918a-4c58-ac84-4e1f78ddff5e\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.310123 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-idp-0-file-data\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.310699 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6737a55-918a-4c58-ac84-4e1f78ddff5e-config\") pod \"machine-approver-54c688565-x4v7p\" (UID: \"c6737a55-918a-4c58-ac84-4e1f78ddff5e\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.311144 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/37c8062b-4496-46f0-9562-9f9d27740557-audit\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.311815 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/44725449-ac50-4ae0-935a-4d70c1a921f1-etcd-serving-ca\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.312611 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/44725449-ac50-4ae0-935a-4d70c1a921f1-audit-policies\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.313145 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/16a06343-d795-4c00-8684-13e9158ab544-tmp\") pod \"openshift-controller-manager-operator-686468bdd5-cddd8\" (UID: \"16a06343-d795-4c00-8684-13e9158ab544\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.313358 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-template-error\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.315004 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/37c8062b-4496-46f0-9562-9f9d27740557-serving-cert\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.316000 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffde9a75-3edf-462f-af90-c312c4f05986-serving-cert\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.316097 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/44725449-ac50-4ae0-935a-4d70c1a921f1-etcd-client\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.320792 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/37c8062b-4496-46f0-9562-9f9d27740557-encryption-config\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.322845 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2554dafb-e152-489f-a585-bfe5638c0b82-serving-cert\") pod \"openshift-config-operator-5777786469-kphvh\" (UID: \"2554dafb-e152-489f-a585-bfe5638c0b82\") " pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.325225 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/44725449-ac50-4ae0-935a-4d70c1a921f1-encryption-config\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.326985 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"oauth-openshift-dockercfg-d2bf2\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.327311 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/0c156d1d-c9e6-43e7-b515-7a9314879127-machine-api-operator-tls\") pod \"machine-api-operator-755bb95488-krhlw\" (UID: \"0c156d1d-c9e6-43e7-b515-7a9314879127\") " pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.330595 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4669eb3c-24d5-4643-91d1-de96326757fa-serving-cert\") pod \"route-controller-manager-776cdc94d6-99ddj\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.330696 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/44725449-ac50-4ae0-935a-4d70c1a921f1-serving-cert\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.332020 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-68cf44c8b8-pdqxh"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.343960 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-ocp-branding-template\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.344413 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.347824 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-session\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.358475 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-69b85846b6-t984r"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.358705 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.362258 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-serving-cert\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.364319 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.364628 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.368404 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.368509 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.371555 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-866fcbc849-ng6js"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.371871 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.375708 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-65b6cccf98-8rbrn"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.375735 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-lxlzd"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.375873 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-ng6js" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.378878 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.379113 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-lxlzd" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.383202 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-trusted-ca-bundle\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.383246 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fde32e16-bb98-4b8e-9e25-fdaf5b8df655-serving-cert\") pod \"console-operator-67c89758df-qh9jw\" (UID: \"fde32e16-bb98-4b8e-9e25-fdaf5b8df655\") " pod="openshift-console-operator/console-operator-67c89758df-qh9jw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.383272 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-oauth-serving-cert\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.383293 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-console-oauth-config\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.383315 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c-metrics-tls\") pod \"ingress-operator-6b9cb4dbcf-5cnkq\" (UID: \"70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.383340 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-scpsz\" (UniqueName: \"kubernetes.io/projected/fde32e16-bb98-4b8e-9e25-fdaf5b8df655-kube-api-access-scpsz\") pod \"console-operator-67c89758df-qh9jw\" (UID: \"fde32e16-bb98-4b8e-9e25-fdaf5b8df655\") " pod="openshift-console-operator/console-operator-67c89758df-qh9jw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384021 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-pww6f\" (UniqueName: \"kubernetes.io/projected/70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c-kube-api-access-pww6f\") pod \"ingress-operator-6b9cb4dbcf-5cnkq\" (UID: \"70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384185 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-hj26t\" (UniqueName: \"kubernetes.io/projected/11b1745f-9b78-40f8-bb20-b2c2590e4f46-kube-api-access-hj26t\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384275 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/41f05909-fc3b-4e9f-85e5-4df36ec3b431-trusted-ca-bundle\") pod \"authentication-operator-7f5c659b84-twdjp\" (UID: \"41f05909-fc3b-4e9f-85e5-4df36ec3b431\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384315 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/41f05909-fc3b-4e9f-85e5-4df36ec3b431-service-ca-bundle\") pod \"authentication-operator-7f5c659b84-twdjp\" (UID: \"41f05909-fc3b-4e9f-85e5-4df36ec3b431\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384345 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fde32e16-bb98-4b8e-9e25-fdaf5b8df655-config\") pod \"console-operator-67c89758df-qh9jw\" (UID: \"fde32e16-bb98-4b8e-9e25-fdaf5b8df655\") " pod="openshift-console-operator/console-operator-67c89758df-qh9jw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384355 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-oauth-serving-cert\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384423 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-trusted-ca-bundle\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384545 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-console-config\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384580 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fde32e16-bb98-4b8e-9e25-fdaf5b8df655-trusted-ca\") pod \"console-operator-67c89758df-qh9jw\" (UID: \"fde32e16-bb98-4b8e-9e25-fdaf5b8df655\") " pod="openshift-console-operator/console-operator-67c89758df-qh9jw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384602 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/11b1745f-9b78-40f8-bb20-b2c2590e4f46-image-registry-operator-tls\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384644 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-pt4xg\" (UniqueName: \"kubernetes.io/projected/41f05909-fc3b-4e9f-85e5-4df36ec3b431-kube-api-access-pt4xg\") pod \"authentication-operator-7f5c659b84-twdjp\" (UID: \"41f05909-fc3b-4e9f-85e5-4df36ec3b431\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384666 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-d8zc5\" (UniqueName: \"kubernetes.io/projected/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-kube-api-access-d8zc5\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384685 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/11b1745f-9b78-40f8-bb20-b2c2590e4f46-tmp\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384708 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41f05909-fc3b-4e9f-85e5-4df36ec3b431-serving-cert\") pod \"authentication-operator-7f5c659b84-twdjp\" (UID: \"41f05909-fc3b-4e9f-85e5-4df36ec3b431\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384725 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-qwrx4\" (UniqueName: \"kubernetes.io/projected/d6483c17-196a-4e41-8950-46d60c5505c9-kube-api-access-qwrx4\") pod \"downloads-747b44746d-kxkvb\" (UID: \"d6483c17-196a-4e41-8950-46d60c5505c9\") " pod="openshift-console/downloads-747b44746d-kxkvb" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384754 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/11b1745f-9b78-40f8-bb20-b2c2590e4f46-trusted-ca\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384803 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-console-serving-cert\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384825 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c-trusted-ca\") pod \"ingress-operator-6b9cb4dbcf-5cnkq\" (UID: \"70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384842 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c-bound-sa-token\") pod \"ingress-operator-6b9cb4dbcf-5cnkq\" (UID: \"70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384858 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/11b1745f-9b78-40f8-bb20-b2c2590e4f46-bound-sa-token\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384877 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-service-ca\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384899 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41f05909-fc3b-4e9f-85e5-4df36ec3b431-config\") pod \"authentication-operator-7f5c659b84-twdjp\" (UID: \"41f05909-fc3b-4e9f-85e5-4df36ec3b431\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.384916 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ca-trust-extracted-pem\" (UniqueName: \"kubernetes.io/empty-dir/11b1745f-9b78-40f8-bb20-b2c2590e4f46-ca-trust-extracted-pem\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.385193 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/41f05909-fc3b-4e9f-85e5-4df36ec3b431-service-ca-bundle\") pod \"authentication-operator-7f5c659b84-twdjp\" (UID: \"41f05909-fc3b-4e9f-85e5-4df36ec3b431\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.385404 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-console-config\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.385490 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/41f05909-fc3b-4e9f-85e5-4df36ec3b431-trusted-ca-bundle\") pod \"authentication-operator-7f5c659b84-twdjp\" (UID: \"41f05909-fc3b-4e9f-85e5-4df36ec3b431\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.385612 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fde32e16-bb98-4b8e-9e25-fdaf5b8df655-config\") pod \"console-operator-67c89758df-qh9jw\" (UID: \"fde32e16-bb98-4b8e-9e25-fdaf5b8df655\") " pod="openshift-console-operator/console-operator-67c89758df-qh9jw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.386217 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted-pem\" (UniqueName: \"kubernetes.io/empty-dir/11b1745f-9b78-40f8-bb20-b2c2590e4f46-ca-trust-extracted-pem\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.386989 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/11b1745f-9b78-40f8-bb20-b2c2590e4f46-tmp\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.387086 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/11b1745f-9b78-40f8-bb20-b2c2590e4f46-trusted-ca\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.387263 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fde32e16-bb98-4b8e-9e25-fdaf5b8df655-trusted-ca\") pod \"console-operator-67c89758df-qh9jw\" (UID: \"fde32e16-bb98-4b8e-9e25-fdaf5b8df655\") " pod="openshift-console-operator/console-operator-67c89758df-qh9jw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.388263 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-service-ca\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.388322 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c-trusted-ca\") pod \"ingress-operator-6b9cb4dbcf-5cnkq\" (UID: \"70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.391824 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-trusted-ca-bundle\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.392736 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.392923 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.396220 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41f05909-fc3b-4e9f-85e5-4df36ec3b431-config\") pod \"authentication-operator-7f5c659b84-twdjp\" (UID: \"41f05909-fc3b-4e9f-85e5-4df36ec3b431\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.396338 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/11b1745f-9b78-40f8-bb20-b2c2590e4f46-image-registry-operator-tls\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.396468 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-console-serving-cert\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.396553 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fde32e16-bb98-4b8e-9e25-fdaf5b8df655-serving-cert\") pod \"console-operator-67c89758df-qh9jw\" (UID: \"fde32e16-bb98-4b8e-9e25-fdaf5b8df655\") " pod="openshift-console-operator/console-operator-67c89758df-qh9jw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.397200 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-console-oauth-config\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.400289 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41f05909-fc3b-4e9f-85e5-4df36ec3b431-serving-cert\") pod \"authentication-operator-7f5c659b84-twdjp\" (UID: \"41f05909-fc3b-4e9f-85e5-4df36ec3b431\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.401867 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"kube-root-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.408508 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.409870 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.417326 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-nvsp7"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.417990 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.421076 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c-metrics-tls\") pod \"ingress-operator-6b9cb4dbcf-5cnkq\" (UID: \"70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.422065 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"openshift-service-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.427214 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.427514 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-nvsp7" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.435802 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.436019 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.441180 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"audit\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.441442 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.444801 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-69db94689b-zr7d6"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.444896 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.445046 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.449436 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-5b9c976747-pgfzj"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.452641 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-69db94689b-zr7d6" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.460826 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-cliconfig\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.472299 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.473063 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-pgfzj" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.504917 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-vd9zl\" (UniqueName: \"kubernetes.io/projected/16a06343-d795-4c00-8684-13e9158ab544-kube-api-access-vd9zl\") pod \"openshift-controller-manager-operator-686468bdd5-cddd8\" (UID: \"16a06343-d795-4c00-8684-13e9158ab544\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.513580 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-qljgk"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.514012 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.523504 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.541853 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-dockercfg-bf7fj\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.552249 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-74545575db-bv8pp"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.552418 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.561715 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-serving-cert\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.581556 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-config\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.593086 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-755bb95488-krhlw"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.593125 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.593140 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-799b87ffcd-dbqgp"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.593156 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-5777786469-kphvh"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.593169 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-8596bd845d-9swvg"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.593184 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-64d44f6ddf-6z7rp"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.593198 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-9ddfb9f55-s5qkt"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.593209 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.593221 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-67c89758df-qh9jw"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.593234 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-9wphq"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.593346 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-74545575db-bv8pp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.599541 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-66458b6674-65wnm"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.599571 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-866fcbc849-ng6js"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.599585 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-l5rrt"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.599820 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.603069 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-hzktp"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.606225 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-b5d9m"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.606351 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.606451 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-hzktp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.613670 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.613709 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.613724 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.613741 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-747b44746d-kxkvb"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.613753 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-pruner-29495520-b9gz9"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.613764 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.613777 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.613789 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-69b85846b6-t984r"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.613809 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-nvsp7"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.613818 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.613830 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.613846 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-smc9m"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.613861 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.615009 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.621632 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.621660 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66587d64c8-bnj47"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.621671 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-lxlzd"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.621680 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.621754 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.621841 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.621853 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-hzktp"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.621862 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.621871 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-9wphq"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.621904 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-5b9c976747-pgfzj"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.621914 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-69db94689b-zr7d6"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.621928 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.621936 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-qljgk"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.621944 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.621971 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-l5rrt"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.621983 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.621993 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-74545575db-bv8pp"] Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.625005 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-smc9m" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.632193 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlbg8\" (UniqueName: \"kubernetes.io/projected/ffde9a75-3edf-462f-af90-c312c4f05986-kube-api-access-jlbg8\") pod \"controller-manager-65b6cccf98-8rbrn\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.638655 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhkx7\" (UniqueName: \"kubernetes.io/projected/c6737a55-918a-4c58-ac84-4e1f78ddff5e-kube-api-access-nhkx7\") pod \"machine-approver-54c688565-x4v7p\" (UID: \"c6737a55-918a-4c58-ac84-4e1f78ddff5e\") " pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.673100 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pwvn\" (UniqueName: \"kubernetes.io/projected/37c8062b-4496-46f0-9562-9f9d27740557-kube-api-access-7pwvn\") pod \"apiserver-9ddfb9f55-s5qkt\" (UID: \"37c8062b-4496-46f0-9562-9f9d27740557\") " pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.677396 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.681496 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bf7t\" (UniqueName: \"kubernetes.io/projected/2554dafb-e152-489f-a585-bfe5638c0b82-kube-api-access-6bf7t\") pod \"openshift-config-operator-5777786469-kphvh\" (UID: \"2554dafb-e152-489f-a585-bfe5638c0b82\") " pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.715086 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-pd6gr\" (UniqueName: \"kubernetes.io/projected/0c156d1d-c9e6-43e7-b515-7a9314879127-kube-api-access-pd6gr\") pod \"machine-api-operator-755bb95488-krhlw\" (UID: \"0c156d1d-c9e6-43e7-b515-7a9314879127\") " pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.719411 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnnjj\" (UniqueName: \"kubernetes.io/projected/d334727d-3c8b-4f75-a3ea-a4b537fe480c-kube-api-access-gnnjj\") pod \"dns-operator-799b87ffcd-dbqgp\" (UID: \"d334727d-3c8b-4f75-a3ea-a4b537fe480c\") " pod="openshift-dns-operator/dns-operator-799b87ffcd-dbqgp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.731872 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.736670 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.762109 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-jf8mr\" (UniqueName: \"kubernetes.io/projected/44725449-ac50-4ae0-935a-4d70c1a921f1-kube-api-access-jf8mr\") pod \"apiserver-8596bd845d-9swvg\" (UID: \"44725449-ac50-4ae0-935a-4d70c1a921f1\") " pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.763289 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.772818 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.773579 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.773728 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.786786 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbf8b\" (UniqueName: \"kubernetes.io/projected/4669eb3c-24d5-4643-91d1-de96326757fa-kube-api-access-xbf8b\") pod \"route-controller-manager-776cdc94d6-99ddj\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.790863 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.801177 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-dockercfg-6c46w\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.821669 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-config\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.822283 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvbqt\" (UniqueName: \"kubernetes.io/projected/28e0ef1a-f823-4898-90a3-66c67c5f19eb-kube-api-access-nvbqt\") pod \"image-pruner-29495520-b9gz9\" (UID: \"28e0ef1a-f823-4898-90a3-66c67c5f19eb\") " pod="openshift-image-registry/image-pruner-29495520-b9gz9" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.841937 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-serving-cert\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.847514 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-799b87ffcd-dbqgp" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.854816 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29495520-b9gz9" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.863844 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.864922 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.887054 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.916245 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress\"/\"openshift-service-ca.crt\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.931993 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-metrics-certs-default\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.941169 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-dockercfg-kw8fx\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.965745 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-certs-default\"" Jan 30 00:11:38 crc kubenswrapper[5113]: I0130 00:11:38.982250 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-stats-default\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.003551 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress\"/\"service-ca-bundle\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.022742 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress\"/\"kube-root-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.063596 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.078811 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.082927 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-etcd-operator\"/\"etcd-operator-serving-cert\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.105132 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-etcd-operator\"/\"etcd-client\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.122469 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"etcd-operator-config\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.141988 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.162475 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"etcd-service-ca-bundle\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.183347 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"etcd-ca-bundle\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.203657 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-etcd-operator\"/\"etcd-operator-dockercfg-4vdnc\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.225759 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-dockercfg-tnfx9\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.248144 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.268075 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-serving-cert\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.282320 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-config\"" Jan 30 00:11:39 crc kubenswrapper[5113]: E0130 00:11:39.293104 5113 secret.go:189] Couldn't get secret openshift-cluster-samples-operator/samples-operator-tls: failed to sync secret cache: timed out waiting for the condition Jan 30 00:11:39 crc kubenswrapper[5113]: E0130 00:11:39.293218 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c4af1b2c-aad9-48c2-b1d7-36cd069c556d-samples-operator-tls podName:c4af1b2c-aad9-48c2-b1d7-36cd069c556d nodeName:}" failed. No retries permitted until 2026-01-30 00:11:39.793195991 +0000 UTC m=+119.865801368 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "samples-operator-tls" (UniqueName: "kubernetes.io/secret/c4af1b2c-aad9-48c2-b1d7-36cd069c556d-samples-operator-tls") pod "cluster-samples-operator-6b564684c8-44zv4" (UID: "c4af1b2c-aad9-48c2-b1d7-36cd069c556d") : failed to sync secret cache: timed out waiting for the condition Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.316152 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-config\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.325036 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-scheduler-operator\"/\"kube-scheduler-operator-serving-cert\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.342543 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-scheduler-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.362243 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-dockercfg-2wbn2\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.379107 5113 request.go:752] "Waited before sending request" delay="1.003025914s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-storage-version-migrator/configmaps?fieldSelector=metadata.name%3Dopenshift-service-ca.crt&limit=500&resourceVersion=0" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.380857 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.405070 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-storage-version-migrator\"/\"kube-storage-version-migrator-sa-dockercfg-kknhg\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.418887 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" event={"ID":"c6737a55-918a-4c58-ac84-4e1f78ddff5e","Type":"ContainerStarted","Data":"a2137f1c517dd8034864c970151fdaa77b64b44cda9c81f97bbaccffa8dfbe3b"} Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.421579 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator\"/\"kube-root-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.442884 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"control-plane-machine-set-operator-tls\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.466889 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"control-plane-machine-set-operator-dockercfg-gnx66\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.508352 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-scpsz\" (UniqueName: \"kubernetes.io/projected/fde32e16-bb98-4b8e-9e25-fdaf5b8df655-kube-api-access-scpsz\") pod \"console-operator-67c89758df-qh9jw\" (UID: \"fde32e16-bb98-4b8e-9e25-fdaf5b8df655\") " pod="openshift-console-operator/console-operator-67c89758df-qh9jw" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.525251 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-pww6f\" (UniqueName: \"kubernetes.io/projected/70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c-kube-api-access-pww6f\") pod \"ingress-operator-6b9cb4dbcf-5cnkq\" (UID: \"70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.538561 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-hj26t\" (UniqueName: \"kubernetes.io/projected/11b1745f-9b78-40f8-bb20-b2c2590e4f46-kube-api-access-hj26t\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.558766 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-pt4xg\" (UniqueName: \"kubernetes.io/projected/41f05909-fc3b-4e9f-85e5-4df36ec3b431-kube-api-access-pt4xg\") pod \"authentication-operator-7f5c659b84-twdjp\" (UID: \"41f05909-fc3b-4e9f-85e5-4df36ec3b431\") " pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.568047 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-67c89758df-qh9jw" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.576985 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8zc5\" (UniqueName: \"kubernetes.io/projected/a5ca6cf7-be11-4cd5-952b-b890c7e3b26e-kube-api-access-d8zc5\") pod \"console-64d44f6ddf-6z7rp\" (UID: \"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e\") " pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.583005 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.602645 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwrx4\" (UniqueName: \"kubernetes.io/projected/d6483c17-196a-4e41-8950-46d60c5505c9-kube-api-access-qwrx4\") pod \"downloads-747b44746d-kxkvb\" (UID: \"d6483c17-196a-4e41-8950-46d60c5505c9\") " pod="openshift-console/downloads-747b44746d-kxkvb" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.618646 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/11b1745f-9b78-40f8-bb20-b2c2590e4f46-bound-sa-token\") pod \"cluster-image-registry-operator-86c45576b9-trfl4\" (UID: \"11b1745f-9b78-40f8-bb20-b2c2590e4f46\") " pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.644984 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operator-lifecycle-manager\"/\"openshift-service-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.652142 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c-bound-sa-token\") pod \"ingress-operator-6b9cb4dbcf-5cnkq\" (UID: \"70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c\") " pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.662017 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"olm-operator-serviceaccount-dockercfg-4gqzj\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.682470 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"olm-operator-serving-cert\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.701863 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"pprof-cert\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.726779 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operator-lifecycle-manager\"/\"kube-root-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5113]: E0130 00:11:39.733296 5113 projected.go:289] Couldn't get configMap openshift-cluster-samples-operator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.742953 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"packageserver-service-cert\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.754623 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-799b87ffcd-dbqgp"] Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.755712 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-5777786469-kphvh"] Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.758640 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj"] Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.760025 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-pruner-29495520-b9gz9"] Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.770616 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operator-lifecycle-manager\"/\"collect-profiles-config\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.771926 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.775894 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8"] Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.778289 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-755bb95488-krhlw"] Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.778363 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-65b6cccf98-8rbrn"] Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.783334 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-8596bd845d-9swvg"] Jan 30 00:11:39 crc kubenswrapper[5113]: W0130 00:11:39.783853 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod28e0ef1a_f823_4898_90a3_66c67c5f19eb.slice/crio-4858e0bc850dd7c0be02a0f21b6e24d5fc618404624f988ad4cb17ecbfbbbae8 WatchSource:0}: Error finding container 4858e0bc850dd7c0be02a0f21b6e24d5fc618404624f988ad4cb17ecbfbbbae8: Status 404 returned error can't find the container with id 4858e0bc850dd7c0be02a0f21b6e24d5fc618404624f988ad4cb17ecbfbbbae8 Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.784246 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"collect-profiles-dockercfg-vfqp6\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.784510 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-9ddfb9f55-s5qkt"] Jan 30 00:11:39 crc kubenswrapper[5113]: W0130 00:11:39.785417 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2554dafb_e152_489f_a585_bfe5638c0b82.slice/crio-efbe518e9adaac0338289bacbb9e42a73343b0110a55d31f608f6ddb33a83816 WatchSource:0}: Error finding container efbe518e9adaac0338289bacbb9e42a73343b0110a55d31f608f6ddb33a83816: Status 404 returned error can't find the container with id efbe518e9adaac0338289bacbb9e42a73343b0110a55d31f608f6ddb33a83816 Jan 30 00:11:39 crc kubenswrapper[5113]: W0130 00:11:39.792394 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd334727d_3c8b_4f75_a3ea_a4b537fe480c.slice/crio-caa522a44e0730f1392f89ddd4e305bb0c8c8eeacf04566c105e32790b9bf2ba WatchSource:0}: Error finding container caa522a44e0730f1392f89ddd4e305bb0c8c8eeacf04566c105e32790b9bf2ba: Status 404 returned error can't find the container with id caa522a44e0730f1392f89ddd4e305bb0c8c8eeacf04566c105e32790b9bf2ba Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.808210 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"package-server-manager-serving-cert\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.809976 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-67c89758df-qh9jw"] Jan 30 00:11:39 crc kubenswrapper[5113]: W0130 00:11:39.820396 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod16a06343_d795_4c00_8684_13e9158ab544.slice/crio-9da1885ff4cb2b8feb0265b9f0a582039c0588772b1825567af8e0a42b3c97ad WatchSource:0}: Error finding container 9da1885ff4cb2b8feb0265b9f0a582039c0588772b1825567af8e0a42b3c97ad: Status 404 returned error can't find the container with id 9da1885ff4cb2b8feb0265b9f0a582039c0588772b1825567af8e0a42b3c97ad Jan 30 00:11:39 crc kubenswrapper[5113]: W0130 00:11:39.820731 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfde32e16_bb98_4b8e_9e25_fdaf5b8df655.slice/crio-45e40a4ab8a89a52ebd64a84b222e9749a3a3a150b8d2796a54d28b127829ecb WatchSource:0}: Error finding container 45e40a4ab8a89a52ebd64a84b222e9749a3a3a150b8d2796a54d28b127829ecb: Status 404 returned error can't find the container with id 45e40a4ab8a89a52ebd64a84b222e9749a3a3a150b8d2796a54d28b127829ecb Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.821912 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"mco-proxy-tls\"" Jan 30 00:11:39 crc kubenswrapper[5113]: W0130 00:11:39.822612 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod44725449_ac50_4ae0_935a_4d70c1a921f1.slice/crio-54b75eb889dc6c4190d64267bf3c079b44270878ea92a6b99e5308af4a61fdc3 WatchSource:0}: Error finding container 54b75eb889dc6c4190d64267bf3c079b44270878ea92a6b99e5308af4a61fdc3: Status 404 returned error can't find the container with id 54b75eb889dc6c4190d64267bf3c079b44270878ea92a6b99e5308af4a61fdc3 Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.823252 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/c4af1b2c-aad9-48c2-b1d7-36cd069c556d-samples-operator-tls\") pod \"cluster-samples-operator-6b564684c8-44zv4\" (UID: \"c4af1b2c-aad9-48c2-b1d7-36cd069c556d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4" Jan 30 00:11:39 crc kubenswrapper[5113]: W0130 00:11:39.829043 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4669eb3c_24d5_4643_91d1_de96326757fa.slice/crio-3e06f21278ec0b235bf9c7f7f6616a93aafaa0df7b7635464108d8445eae9808 WatchSource:0}: Error finding container 3e06f21278ec0b235bf9c7f7f6616a93aafaa0df7b7635464108d8445eae9808: Status 404 returned error can't find the container with id 3e06f21278ec0b235bf9c7f7f6616a93aafaa0df7b7635464108d8445eae9808 Jan 30 00:11:39 crc kubenswrapper[5113]: W0130 00:11:39.830323 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37c8062b_4496_46f0_9562_9f9d27740557.slice/crio-d21b770ecd49fa3f4ec5786146aaddb021a00069d353c1d43ea4be3d6d92f1a6 WatchSource:0}: Error finding container d21b770ecd49fa3f4ec5786146aaddb021a00069d353c1d43ea4be3d6d92f1a6: Status 404 returned error can't find the container with id d21b770ecd49fa3f4ec5786146aaddb021a00069d353c1d43ea4be3d6d92f1a6 Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.846599 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-operator-dockercfg-sw6nc\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.848754 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.861627 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-operator-images\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.862303 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp"] Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.889271 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.893563 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"catalog-operator-serving-cert\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.900728 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-747b44746d-kxkvb" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.901327 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.917810 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.922355 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"serving-cert\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.948015 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"config\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.963428 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:39 crc kubenswrapper[5113]: I0130 00:11:39.982124 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-dockercfg-2h6bs\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.002133 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"multus-ac-dockercfg-gj7jx\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.032101 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"multus-admission-controller-secret\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.040859 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca-operator\"/\"service-ca-operator-config\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.061630 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.077711 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-64d44f6ddf-6z7rp"] Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.082248 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca-operator\"/\"service-ca-operator-dockercfg-bjqfd\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.103814 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.123338 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca-operator\"/\"serving-cert\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.161722 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"mcc-proxy-tls\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.182378 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-controller-dockercfg-xnj77\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.189508 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq"] Jan 30 00:11:40 crc kubenswrapper[5113]: W0130 00:11:40.198319 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod70b23cc7_4205_4b33_a0a7_a30ee9d4ff7c.slice/crio-38fbcd27a23a578e2c77d68084211a720b35394fda6ce89bedd864b9becb17d2 WatchSource:0}: Error finding container 38fbcd27a23a578e2c77d68084211a720b35394fda6ce89bedd864b9becb17d2: Status 404 returned error can't find the container with id 38fbcd27a23a578e2c77d68084211a720b35394fda6ce89bedd864b9becb17d2 Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.202607 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-marketplace\"/\"openshift-service-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.223820 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"marketplace-operator-dockercfg-2cfkp\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.224188 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4"] Jan 30 00:11:40 crc kubenswrapper[5113]: W0130 00:11:40.227129 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod11b1745f_9b78_40f8_bb20_b2c2590e4f46.slice/crio-009cfa7af8deba08a2aac6ab08dba09ee70b823e0fe9b8d58680c11240796aac WatchSource:0}: Error finding container 009cfa7af8deba08a2aac6ab08dba09ee70b823e0fe9b8d58680c11240796aac: Status 404 returned error can't find the container with id 009cfa7af8deba08a2aac6ab08dba09ee70b823e0fe9b8d58680c11240796aac Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.241917 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"marketplace-operator-metrics\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.247396 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-747b44746d-kxkvb"] Jan 30 00:11:40 crc kubenswrapper[5113]: W0130 00:11:40.253347 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd6483c17_196a_4e41_8950_46d60c5505c9.slice/crio-74cc8d5084ec1bd035686c91a13f448d830c7b9277ba67341e13be954d829192 WatchSource:0}: Error finding container 74cc8d5084ec1bd035686c91a13f448d830c7b9277ba67341e13be954d829192: Status 404 returned error can't find the container with id 74cc8d5084ec1bd035686c91a13f448d830c7b9277ba67341e13be954d829192 Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.267971 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-marketplace\"/\"marketplace-trusted-ca\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.281850 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-marketplace\"/\"kube-root-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.302099 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca\"/\"signing-key\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.321162 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca\"/\"service-ca-dockercfg-bgxvm\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.340788 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca\"/\"signing-cabundle\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.361325 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca\"/\"openshift-service-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.379563 5113 request.go:752] "Waited before sending request" delay="1.785752488s" reason="client-side throttling, not priority and fairness" verb="GET" URL="https://api-int.crc.testing:6443/api/v1/namespaces/openshift-service-ca/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.381891 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca\"/\"kube-root-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.401553 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns\"/\"dns-default\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.422350 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns\"/\"dns-default-metrics-tls\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.425216 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29495520-b9gz9" event={"ID":"28e0ef1a-f823-4898-90a3-66c67c5f19eb","Type":"ContainerStarted","Data":"4858e0bc850dd7c0be02a0f21b6e24d5fc618404624f988ad4cb17ecbfbbbae8"} Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.426623 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" event={"ID":"ffde9a75-3edf-462f-af90-c312c4f05986","Type":"ContainerStarted","Data":"c1c012b62bfe6b7488b4f703d5f3583c4f3a6737978bb60ce88d3828c929ea9f"} Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.427409 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" event={"ID":"44725449-ac50-4ae0-935a-4d70c1a921f1","Type":"ContainerStarted","Data":"54b75eb889dc6c4190d64267bf3c079b44270878ea92a6b99e5308af4a61fdc3"} Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.428236 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" event={"ID":"37c8062b-4496-46f0-9562-9f9d27740557","Type":"ContainerStarted","Data":"d21b770ecd49fa3f4ec5786146aaddb021a00069d353c1d43ea4be3d6d92f1a6"} Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.429199 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" event={"ID":"0c156d1d-c9e6-43e7-b515-7a9314879127","Type":"ContainerStarted","Data":"89b5e67777b237e751c8d5d0ef0dfbc1d3ed5513992d141e780ff94a8a589429"} Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.430067 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" event={"ID":"2554dafb-e152-489f-a585-bfe5638c0b82","Type":"ContainerStarted","Data":"efbe518e9adaac0338289bacbb9e42a73343b0110a55d31f608f6ddb33a83816"} Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.431563 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-799b87ffcd-dbqgp" event={"ID":"d334727d-3c8b-4f75-a3ea-a4b537fe480c","Type":"ContainerStarted","Data":"caa522a44e0730f1392f89ddd4e305bb0c8c8eeacf04566c105e32790b9bf2ba"} Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.432452 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" event={"ID":"11b1745f-9b78-40f8-bb20-b2c2590e4f46","Type":"ContainerStarted","Data":"009cfa7af8deba08a2aac6ab08dba09ee70b823e0fe9b8d58680c11240796aac"} Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.433316 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-64d44f6ddf-6z7rp" event={"ID":"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e","Type":"ContainerStarted","Data":"e7b5c56294bfc41d6466fa3a9a39036b057b18483d8c9d705567d3a906e795a5"} Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.434723 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" event={"ID":"41f05909-fc3b-4e9f-85e5-4df36ec3b431","Type":"ContainerStarted","Data":"6826c53754d1d79c1aea17a0b1552d6a21f4f2a5f9e2f78adfd923a9bbac26b7"} Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.435421 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-67c89758df-qh9jw" event={"ID":"fde32e16-bb98-4b8e-9e25-fdaf5b8df655","Type":"ContainerStarted","Data":"45e40a4ab8a89a52ebd64a84b222e9749a3a3a150b8d2796a54d28b127829ecb"} Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.435968 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-747b44746d-kxkvb" event={"ID":"d6483c17-196a-4e41-8950-46d60c5505c9","Type":"ContainerStarted","Data":"74cc8d5084ec1bd035686c91a13f448d830c7b9277ba67341e13be954d829192"} Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.436593 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8" event={"ID":"16a06343-d795-4c00-8684-13e9158ab544","Type":"ContainerStarted","Data":"9da1885ff4cb2b8feb0265b9f0a582039c0588772b1825567af8e0a42b3c97ad"} Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.437231 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" event={"ID":"70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c","Type":"ContainerStarted","Data":"38fbcd27a23a578e2c77d68084211a720b35394fda6ce89bedd864b9becb17d2"} Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.438378 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" event={"ID":"4669eb3c-24d5-4643-91d1-de96326757fa","Type":"ContainerStarted","Data":"3e06f21278ec0b235bf9c7f7f6616a93aafaa0df7b7635464108d8445eae9808"} Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.442515 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns\"/\"dns-dockercfg-kpvmz\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.461011 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-canary\"/\"default-dockercfg-9pgs7\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.481596 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"hostpath-provisioner\"/\"csi-hostpath-provisioner-sa-dockercfg-7dcws\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.500725 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"hostpath-provisioner\"/\"openshift-service-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.522969 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-canary\"/\"canary-serving-cert\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.543136 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"hostpath-provisioner\"/\"kube-root-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.560367 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-canary\"/\"kube-root-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.581368 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-canary\"/\"openshift-service-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.601388 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"cni-sysctl-allowlist\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.622140 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-server-dockercfg-dzw6b\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.642512 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"node-bootstrapper-token\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.663897 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-server-tls\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.680790 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-network-console\"/\"networking-console-plugin-cert\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.701727 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-console\"/\"networking-console-plugin\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.722319 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-diagnostics\"/\"openshift-service-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5113]: E0130 00:11:40.733938 5113 projected.go:289] Couldn't get configMap openshift-cluster-samples-operator/openshift-service-ca.crt: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:40 crc kubenswrapper[5113]: E0130 00:11:40.733996 5113 projected.go:194] Error preparing data for projected volume kube-api-access-9zkh5 for pod openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:40 crc kubenswrapper[5113]: E0130 00:11:40.734087 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c4af1b2c-aad9-48c2-b1d7-36cd069c556d-kube-api-access-9zkh5 podName:c4af1b2c-aad9-48c2-b1d7-36cd069c556d nodeName:}" failed. No retries permitted until 2026-01-30 00:11:41.234066913 +0000 UTC m=+121.306672290 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-9zkh5" (UniqueName: "kubernetes.io/projected/c4af1b2c-aad9-48c2-b1d7-36cd069c556d-kube-api-access-9zkh5") pod "cluster-samples-operator-6b564684c8-44zv4" (UID: "c4af1b2c-aad9-48c2-b1d7-36cd069c556d") : failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.750109 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-diagnostics\"/\"kube-root-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.761865 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-samples-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.803473 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-samples-operator\"/\"samples-operator-tls\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.817679 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/c4af1b2c-aad9-48c2-b1d7-36cd069c556d-samples-operator-tls\") pod \"cluster-samples-operator-6b564684c8-44zv4\" (UID: \"c4af1b2c-aad9-48c2-b1d7-36cd069c556d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.822145 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-samples-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836003 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9g2k\" (UniqueName: \"kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-kube-api-access-c9g2k\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836049 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/a4715539-c6a4-4c6f-9380-73fae6de4fe0-tmp-dir\") pod \"kube-apiserver-operator-575994946d-pstq8\" (UID: \"a4715539-c6a4-4c6f-9380-73fae6de4fe0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836086 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-serving-cert\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836109 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mchh\" (UniqueName: \"kubernetes.io/projected/88af8dd9-5994-4276-a701-8dc0af32b4bb-kube-api-access-2mchh\") pod \"openshift-apiserver-operator-846cbfc458-zmdjn\" (UID: \"88af8dd9-5994-4276-a701-8dc0af32b4bb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836141 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4715539-c6a4-4c6f-9380-73fae6de4fe0-serving-cert\") pod \"kube-apiserver-operator-575994946d-pstq8\" (UID: \"a4715539-c6a4-4c6f-9380-73fae6de4fe0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836182 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/88af8dd9-5994-4276-a701-8dc0af32b4bb-config\") pod \"openshift-apiserver-operator-846cbfc458-zmdjn\" (UID: \"88af8dd9-5994-4276-a701-8dc0af32b4bb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836208 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/de29c822-8061-4f04-9a8a-b36f6ab0082e-registry-certificates\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836230 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-service-ca\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836248 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836268 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9ld2\" (UniqueName: \"kubernetes.io/projected/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-kube-api-access-n9ld2\") pod \"router-default-68cf44c8b8-pdqxh\" (UID: \"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433\") " pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836282 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4715539-c6a4-4c6f-9380-73fae6de4fe0-config\") pod \"kube-apiserver-operator-575994946d-pstq8\" (UID: \"a4715539-c6a4-4c6f-9380-73fae6de4fe0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836301 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/88af8dd9-5994-4276-a701-8dc0af32b4bb-serving-cert\") pod \"openshift-apiserver-operator-846cbfc458-zmdjn\" (UID: \"88af8dd9-5994-4276-a701-8dc0af32b4bb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836333 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/de29c822-8061-4f04-9a8a-b36f6ab0082e-ca-trust-extracted\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836350 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-error\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836366 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/62474d91-1e1c-48ee-b28d-bfa517692c72-audit-dir\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836386 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/de29c822-8061-4f04-9a8a-b36f6ab0082e-trusted-ca\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836403 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836490 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-session\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836580 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/de29c822-8061-4f04-9a8a-b36f6ab0082e-installation-pull-secrets\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836601 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-router-certs\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836622 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.836642 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a4715539-c6a4-4c6f-9380-73fae6de4fe0-kube-api-access\") pod \"kube-apiserver-operator-575994946d-pstq8\" (UID: \"a4715539-c6a4-4c6f-9380-73fae6de4fe0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.837081 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-registry-tls\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.837126 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-default-certificate\") pod \"router-default-68cf44c8b8-pdqxh\" (UID: \"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433\") " pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.837155 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-service-ca-bundle\") pod \"router-default-68cf44c8b8-pdqxh\" (UID: \"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433\") " pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.837311 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.837343 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-cliconfig\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.837370 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9phx\" (UniqueName: \"kubernetes.io/projected/62474d91-1e1c-48ee-b28d-bfa517692c72-kube-api-access-c9phx\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.837439 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.837464 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-stats-auth\") pod \"router-default-68cf44c8b8-pdqxh\" (UID: \"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433\") " pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.837498 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-audit-policies\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.837553 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-login\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.837589 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-metrics-certs\") pod \"router-default-68cf44c8b8-pdqxh\" (UID: \"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433\") " pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.837632 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-bound-sa-token\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: E0130 00:11:40.838492 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:41.33847247 +0000 UTC m=+121.411077847 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.842236 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"metrics-daemon-secret\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.864256 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"metrics-daemon-sa-dockercfg-t8n29\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.938746 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:40 crc kubenswrapper[5113]: E0130 00:11:40.938938 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:41.438904244 +0000 UTC m=+121.511509621 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.939385 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0635667-b4d3-43fb-b783-841a7bf96457-config\") pod \"kube-controller-manager-operator-69d5f845f8-pnrbz\" (UID: \"d0635667-b4d3-43fb-b783-841a7bf96457\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.939448 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbrpx\" (UniqueName: \"kubernetes.io/projected/fd060364-4a55-4af3-b560-0530d43641d0-kube-api-access-lbrpx\") pod \"catalog-operator-75ff9f647d-x975j\" (UID: \"fd060364-4a55-4af3-b560-0530d43641d0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.939589 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/d66505a1-a5af-4565-b177-ab0ea765730c-tmp-dir\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.939679 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/503cdef7-1c26-4522-a0d3-09a28bad0340-serving-cert\") pod \"openshift-kube-scheduler-operator-54f497555d-nm82k\" (UID: \"503cdef7-1c26-4522-a0d3-09a28bad0340\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.939754 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.939815 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/503cdef7-1c26-4522-a0d3-09a28bad0340-tmp\") pod \"openshift-kube-scheduler-operator-54f497555d-nm82k\" (UID: \"503cdef7-1c26-4522-a0d3-09a28bad0340\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.939864 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-secret-volume\") pod \"collect-profiles-29495520-qdtrg\" (UID: \"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.939904 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkwrr\" (UniqueName: \"kubernetes.io/projected/3634960c-b798-480d-b652-3ffca7e9cf70-kube-api-access-vkwrr\") pod \"migrator-866fcbc849-ng6js\" (UID: \"3634960c-b798-480d-b652-3ffca7e9cf70\") " pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-ng6js" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.939955 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-marketplace-trusted-ca\") pod \"marketplace-operator-547dbd544d-qljgk\" (UID: \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.939992 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-session\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.940046 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-audit-policies\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.940083 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pk9h\" (UniqueName: \"kubernetes.io/projected/256a3e7e-168f-4aaf-81dd-0e4da35fcccc-kube-api-access-8pk9h\") pod \"machine-config-controller-f9cdd68f7-v8gqb\" (UID: \"256a3e7e-168f-4aaf-81dd-0e4da35fcccc\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.940138 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-tmp\") pod \"marketplace-operator-547dbd544d-qljgk\" (UID: \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.940201 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnvxp\" (UniqueName: \"kubernetes.io/projected/de9d205a-be35-4bef-8883-3c11fddc1c8a-kube-api-access-tnvxp\") pod \"packageserver-7d4fc7d867-v7z7g\" (UID: \"de9d205a-be35-4bef-8883-3c11fddc1c8a\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.940279 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/71f792d7-ae36-482e-87f3-fc30cfa91377-images\") pod \"machine-config-operator-67c9d58cbb-m9c4q\" (UID: \"71f792d7-ae36-482e-87f3-fc30cfa91377\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.940318 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/de29c822-8061-4f04-9a8a-b36f6ab0082e-installation-pull-secrets\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.940411 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1c511f97-df39-47bb-b5e9-58ed11fc3263-metrics-tls\") pod \"dns-default-9wphq\" (UID: \"1c511f97-df39-47bb-b5e9-58ed11fc3263\") " pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.940472 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8kgz\" (UniqueName: \"kubernetes.io/projected/845806fc-89db-419b-9f4c-b9263fa93527-kube-api-access-s8kgz\") pod \"ingress-canary-hzktp\" (UID: \"845806fc-89db-419b-9f4c-b9263fa93527\") " pod="openshift-ingress-canary/ingress-canary-hzktp" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.940546 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a4715539-c6a4-4c6f-9380-73fae6de4fe0-kube-api-access\") pod \"kube-apiserver-operator-575994946d-pstq8\" (UID: \"a4715539-c6a4-4c6f-9380-73fae6de4fe0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.940627 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-default-certificate\") pod \"router-default-68cf44c8b8-pdqxh\" (UID: \"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433\") " pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.940707 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/71f792d7-ae36-482e-87f3-fc30cfa91377-proxy-tls\") pod \"machine-config-operator-67c9d58cbb-m9c4q\" (UID: \"71f792d7-ae36-482e-87f3-fc30cfa91377\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.940794 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-marketplace-operator-metrics\") pod \"marketplace-operator-547dbd544d-qljgk\" (UID: \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.940875 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-registration-dir\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.940899 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b181d297-81e3-4d12-8537-1ee3a42213ce-certs\") pod \"machine-config-server-smc9m\" (UID: \"b181d297-81e3-4d12-8537-1ee3a42213ce\") " pod="openshift-machine-config-operator/machine-config-server-smc9m" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.940936 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.940970 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-mountpoint-dir\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941045 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-config-volume\") pod \"collect-profiles-29495520-qdtrg\" (UID: \"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941068 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4c2d\" (UniqueName: \"kubernetes.io/projected/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-kube-api-access-b4c2d\") pod \"collect-profiles-29495520-qdtrg\" (UID: \"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941088 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vds9\" (UniqueName: \"kubernetes.io/projected/48229b09-7bab-42bd-83f2-ad2944222630-kube-api-access-6vds9\") pod \"multus-admission-controller-69db94689b-zr7d6\" (UID: \"48229b09-7bab-42bd-83f2-ad2944222630\") " pod="openshift-multus/multus-admission-controller-69db94689b-zr7d6" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941152 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-metrics-certs\") pod \"router-default-68cf44c8b8-pdqxh\" (UID: \"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433\") " pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941223 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/61eb8992-aa29-40df-bd7b-c3e937249c67-package-server-manager-serving-cert\") pod \"package-server-manager-77f986bd66-nvsp7\" (UID: \"61eb8992-aa29-40df-bd7b-c3e937249c67\") " pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-nvsp7" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941283 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/de9d205a-be35-4bef-8883-3c11fddc1c8a-webhook-cert\") pod \"packageserver-7d4fc7d867-v7z7g\" (UID: \"de9d205a-be35-4bef-8883-3c11fddc1c8a\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941317 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b181d297-81e3-4d12-8537-1ee3a42213ce-node-bootstrap-token\") pod \"machine-config-server-smc9m\" (UID: \"b181d297-81e3-4d12-8537-1ee3a42213ce\") " pod="openshift-machine-config-operator/machine-config-server-smc9m" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941339 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-login\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941362 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d66505a1-a5af-4565-b177-ab0ea765730c-config\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941381 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hfdm\" (UniqueName: \"kubernetes.io/projected/b181d297-81e3-4d12-8537-1ee3a42213ce-kube-api-access-8hfdm\") pod \"machine-config-server-smc9m\" (UID: \"b181d297-81e3-4d12-8537-1ee3a42213ce\") " pod="openshift-machine-config-operator/machine-config-server-smc9m" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941399 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/17999f27-0d6c-46f2-82b4-a07bec4b1021-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-75ffdb6fcd-lxlzd\" (UID: \"17999f27-0d6c-46f2-82b4-a07bec4b1021\") " pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-lxlzd" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941421 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/845806fc-89db-419b-9f4c-b9263fa93527-cert\") pod \"ingress-canary-hzktp\" (UID: \"845806fc-89db-419b-9f4c-b9263fa93527\") " pod="openshift-ingress-canary/ingress-canary-hzktp" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941507 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-serving-cert\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941571 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-csi-data-dir\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941599 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d0635667-b4d3-43fb-b783-841a7bf96457-serving-cert\") pod \"kube-controller-manager-operator-69d5f845f8-pnrbz\" (UID: \"d0635667-b4d3-43fb-b783-841a7bf96457\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941629 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d245ee6c-4b68-41b6-b516-38a882666394-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-b5d9m\" (UID: \"d245ee6c-4b68-41b6-b516-38a882666394\") " pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941654 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n7fpj\" (UniqueName: \"kubernetes.io/projected/1c511f97-df39-47bb-b5e9-58ed11fc3263-kube-api-access-n7fpj\") pod \"dns-default-9wphq\" (UID: \"1c511f97-df39-47bb-b5e9-58ed11fc3263\") " pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941705 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfhf2\" (UniqueName: \"kubernetes.io/projected/614dc955-75e1-4543-9e87-e3f4835c927d-kube-api-access-gfhf2\") pod \"service-ca-operator-5b9c976747-pgfzj\" (UID: \"614dc955-75e1-4543-9e87-e3f4835c927d\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-pgfzj" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941733 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kl7n4\" (UniqueName: \"kubernetes.io/projected/d245ee6c-4b68-41b6-b516-38a882666394-kube-api-access-kl7n4\") pod \"cni-sysctl-allowlist-ds-b5d9m\" (UID: \"d245ee6c-4b68-41b6-b516-38a882666394\") " pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941758 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/1c511f97-df39-47bb-b5e9-58ed11fc3263-tmp-dir\") pod \"dns-default-9wphq\" (UID: \"1c511f97-df39-47bb-b5e9-58ed11fc3263\") " pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941783 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cba9a802-5da5-472a-89eb-8ef391936cb8-srv-cert\") pod \"olm-operator-5cdf44d969-vcjpp\" (UID: \"cba9a802-5da5-472a-89eb-8ef391936cb8\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941878 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/88af8dd9-5994-4276-a701-8dc0af32b4bb-config\") pod \"openshift-apiserver-operator-846cbfc458-zmdjn\" (UID: \"88af8dd9-5994-4276-a701-8dc0af32b4bb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941906 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/614dc955-75e1-4543-9e87-e3f4835c927d-serving-cert\") pod \"service-ca-operator-5b9c976747-pgfzj\" (UID: \"614dc955-75e1-4543-9e87-e3f4835c927d\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-pgfzj" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941933 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d0635667-b4d3-43fb-b783-841a7bf96457-kube-api-access\") pod \"kube-controller-manager-operator-69d5f845f8-pnrbz\" (UID: \"d0635667-b4d3-43fb-b783-841a7bf96457\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.941978 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/fd060364-4a55-4af3-b560-0530d43641d0-profile-collector-cert\") pod \"catalog-operator-75ff9f647d-x975j\" (UID: \"fd060364-4a55-4af3-b560-0530d43641d0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942040 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942070 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-n9ld2\" (UniqueName: \"kubernetes.io/projected/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-kube-api-access-n9ld2\") pod \"router-default-68cf44c8b8-pdqxh\" (UID: \"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433\") " pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942096 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4715539-c6a4-4c6f-9380-73fae6de4fe0-config\") pod \"kube-apiserver-operator-575994946d-pstq8\" (UID: \"a4715539-c6a4-4c6f-9380-73fae6de4fe0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942122 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-service-ca\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942148 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/d245ee6c-4b68-41b6-b516-38a882666394-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-b5d9m\" (UID: \"d245ee6c-4b68-41b6-b516-38a882666394\") " pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942195 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sz7cq\" (UniqueName: \"kubernetes.io/projected/cba9a802-5da5-472a-89eb-8ef391936cb8-kube-api-access-sz7cq\") pod \"olm-operator-5cdf44d969-vcjpp\" (UID: \"cba9a802-5da5-472a-89eb-8ef391936cb8\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942230 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/de29c822-8061-4f04-9a8a-b36f6ab0082e-ca-trust-extracted\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942256 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-error\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942294 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/62474d91-1e1c-48ee-b28d-bfa517692c72-audit-dir\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942424 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/32a41280-9c1f-40b4-85cf-3d18b87e6d55-serving-cert\") pod \"kube-storage-version-migrator-operator-565b79b866-5vp2x\" (UID: \"32a41280-9c1f-40b4-85cf-3d18b87e6d55\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942458 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fjxh\" (UniqueName: \"kubernetes.io/projected/32a41280-9c1f-40b4-85cf-3d18b87e6d55-kube-api-access-5fjxh\") pod \"kube-storage-version-migrator-operator-565b79b866-5vp2x\" (UID: \"32a41280-9c1f-40b4-85cf-3d18b87e6d55\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942479 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d66505a1-a5af-4565-b177-ab0ea765730c-etcd-client\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942503 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/de29c822-8061-4f04-9a8a-b36f6ab0082e-trusted-ca\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942540 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-plugins-dir\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942644 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/62474d91-1e1c-48ee-b28d-bfa517692c72-audit-dir\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942703 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-router-certs\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942767 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942864 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/fd060364-4a55-4af3-b560-0530d43641d0-srv-cert\") pod \"catalog-operator-75ff9f647d-x975j\" (UID: \"fd060364-4a55-4af3-b560-0530d43641d0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942908 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/614dc955-75e1-4543-9e87-e3f4835c927d-config\") pod \"service-ca-operator-5b9c976747-pgfzj\" (UID: \"614dc955-75e1-4543-9e87-e3f4835c927d\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-pgfzj" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.942934 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/cba9a802-5da5-472a-89eb-8ef391936cb8-tmpfs\") pod \"olm-operator-5cdf44d969-vcjpp\" (UID: \"cba9a802-5da5-472a-89eb-8ef391936cb8\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943009 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cj4s\" (UniqueName: \"kubernetes.io/projected/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-kube-api-access-8cj4s\") pod \"marketplace-operator-547dbd544d-qljgk\" (UID: \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943010 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/de29c822-8061-4f04-9a8a-b36f6ab0082e-ca-trust-extracted\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943049 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-2mchh\" (UniqueName: \"kubernetes.io/projected/88af8dd9-5994-4276-a701-8dc0af32b4bb-kube-api-access-2mchh\") pod \"openshift-apiserver-operator-846cbfc458-zmdjn\" (UID: \"88af8dd9-5994-4276-a701-8dc0af32b4bb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943094 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8rps\" (UniqueName: \"kubernetes.io/projected/d66505a1-a5af-4565-b177-ab0ea765730c-kube-api-access-x8rps\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943134 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/503cdef7-1c26-4522-a0d3-09a28bad0340-kube-api-access\") pod \"openshift-kube-scheduler-operator-54f497555d-nm82k\" (UID: \"503cdef7-1c26-4522-a0d3-09a28bad0340\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943160 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cba9a802-5da5-472a-89eb-8ef391936cb8-profile-collector-cert\") pod \"olm-operator-5cdf44d969-vcjpp\" (UID: \"cba9a802-5da5-472a-89eb-8ef391936cb8\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943188 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-registry-tls\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943217 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-service-ca-bundle\") pod \"router-default-68cf44c8b8-pdqxh\" (UID: \"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433\") " pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943242 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/d0635667-b4d3-43fb-b783-841a7bf96457-tmp-dir\") pod \"kube-controller-manager-operator-69d5f845f8-pnrbz\" (UID: \"d0635667-b4d3-43fb-b783-841a7bf96457\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943265 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d66505a1-a5af-4565-b177-ab0ea765730c-serving-cert\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943341 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/256a3e7e-168f-4aaf-81dd-0e4da35fcccc-proxy-tls\") pod \"machine-config-controller-f9cdd68f7-v8gqb\" (UID: \"256a3e7e-168f-4aaf-81dd-0e4da35fcccc\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943383 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-cliconfig\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943406 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-c9phx\" (UniqueName: \"kubernetes.io/projected/62474d91-1e1c-48ee-b28d-bfa517692c72-kube-api-access-c9phx\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943437 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-stats-auth\") pod \"router-default-68cf44c8b8-pdqxh\" (UID: \"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433\") " pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943463 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/48229b09-7bab-42bd-83f2-ad2944222630-webhook-certs\") pod \"multus-admission-controller-69db94689b-zr7d6\" (UID: \"48229b09-7bab-42bd-83f2-ad2944222630\") " pod="openshift-multus/multus-admission-controller-69db94689b-zr7d6" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943500 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943556 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtrw5\" (UniqueName: \"kubernetes.io/projected/61eb8992-aa29-40df-bd7b-c3e937249c67-kube-api-access-mtrw5\") pod \"package-server-manager-77f986bd66-nvsp7\" (UID: \"61eb8992-aa29-40df-bd7b-c3e937249c67\") " pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-nvsp7" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943751 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/d66505a1-a5af-4565-b177-ab0ea765730c-etcd-service-ca\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943795 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/de9d205a-be35-4bef-8883-3c11fddc1c8a-tmpfs\") pod \"packageserver-7d4fc7d867-v7z7g\" (UID: \"de9d205a-be35-4bef-8883-3c11fddc1c8a\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" Jan 30 00:11:40 crc kubenswrapper[5113]: E0130 00:11:40.943813 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:41.443801897 +0000 UTC m=+121.516407504 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943851 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/de29c822-8061-4f04-9a8a-b36f6ab0082e-trusted-ca\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943874 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/d245ee6c-4b68-41b6-b516-38a882666394-ready\") pod \"cni-sysctl-allowlist-ds-b5d9m\" (UID: \"d245ee6c-4b68-41b6-b516-38a882666394\") " pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943908 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhd9x\" (UniqueName: \"kubernetes.io/projected/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-kube-api-access-dhd9x\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943947 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/fd060364-4a55-4af3-b560-0530d43641d0-tmpfs\") pod \"catalog-operator-75ff9f647d-x975j\" (UID: \"fd060364-4a55-4af3-b560-0530d43641d0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.943974 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/d66505a1-a5af-4565-b177-ab0ea765730c-etcd-ca\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944012 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-bound-sa-token\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944043 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-c9g2k\" (UniqueName: \"kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-kube-api-access-c9g2k\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944071 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/a4715539-c6a4-4c6f-9380-73fae6de4fe0-tmp-dir\") pod \"kube-apiserver-operator-575994946d-pstq8\" (UID: \"a4715539-c6a4-4c6f-9380-73fae6de4fe0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944105 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4715539-c6a4-4c6f-9380-73fae6de4fe0-serving-cert\") pod \"kube-apiserver-operator-575994946d-pstq8\" (UID: \"a4715539-c6a4-4c6f-9380-73fae6de4fe0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944129 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/de9d205a-be35-4bef-8883-3c11fddc1c8a-apiservice-cert\") pod \"packageserver-7d4fc7d867-v7z7g\" (UID: \"de9d205a-be35-4bef-8883-3c11fddc1c8a\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944157 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/32a41280-9c1f-40b4-85cf-3d18b87e6d55-config\") pod \"kube-storage-version-migrator-operator-565b79b866-5vp2x\" (UID: \"32a41280-9c1f-40b4-85cf-3d18b87e6d55\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944180 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/71f792d7-ae36-482e-87f3-fc30cfa91377-auth-proxy-config\") pod \"machine-config-operator-67c9d58cbb-m9c4q\" (UID: \"71f792d7-ae36-482e-87f3-fc30cfa91377\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944209 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkw8c\" (UniqueName: \"kubernetes.io/projected/17999f27-0d6c-46f2-82b4-a07bec4b1021-kube-api-access-kkw8c\") pod \"control-plane-machine-set-operator-75ffdb6fcd-lxlzd\" (UID: \"17999f27-0d6c-46f2-82b4-a07bec4b1021\") " pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-lxlzd" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944254 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/f36beec6-1904-41d7-bb02-d83e22db2c5a-signing-cabundle\") pod \"service-ca-74545575db-bv8pp\" (UID: \"f36beec6-1904-41d7-bb02-d83e22db2c5a\") " pod="openshift-service-ca/service-ca-74545575db-bv8pp" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944326 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phnqd\" (UniqueName: \"kubernetes.io/projected/f36beec6-1904-41d7-bb02-d83e22db2c5a-kube-api-access-phnqd\") pod \"service-ca-74545575db-bv8pp\" (UID: \"f36beec6-1904-41d7-bb02-d83e22db2c5a\") " pod="openshift-service-ca/service-ca-74545575db-bv8pp" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944367 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c511f97-df39-47bb-b5e9-58ed11fc3263-config-volume\") pod \"dns-default-9wphq\" (UID: \"1c511f97-df39-47bb-b5e9-58ed11fc3263\") " pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944460 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/503cdef7-1c26-4522-a0d3-09a28bad0340-config\") pod \"openshift-kube-scheduler-operator-54f497555d-nm82k\" (UID: \"503cdef7-1c26-4522-a0d3-09a28bad0340\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944497 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-socket-dir\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944542 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sj5qv\" (UniqueName: \"kubernetes.io/projected/71f792d7-ae36-482e-87f3-fc30cfa91377-kube-api-access-sj5qv\") pod \"machine-config-operator-67c9d58cbb-m9c4q\" (UID: \"71f792d7-ae36-482e-87f3-fc30cfa91377\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944560 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/256a3e7e-168f-4aaf-81dd-0e4da35fcccc-mcc-auth-proxy-config\") pod \"machine-config-controller-f9cdd68f7-v8gqb\" (UID: \"256a3e7e-168f-4aaf-81dd-0e4da35fcccc\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944584 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/de29c822-8061-4f04-9a8a-b36f6ab0082e-registry-certificates\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944678 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/a4715539-c6a4-4c6f-9380-73fae6de4fe0-tmp-dir\") pod \"kube-apiserver-operator-575994946d-pstq8\" (UID: \"a4715539-c6a4-4c6f-9380-73fae6de4fe0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944712 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/88af8dd9-5994-4276-a701-8dc0af32b4bb-serving-cert\") pod \"openshift-apiserver-operator-846cbfc458-zmdjn\" (UID: \"88af8dd9-5994-4276-a701-8dc0af32b4bb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.944808 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/f36beec6-1904-41d7-bb02-d83e22db2c5a-signing-key\") pod \"service-ca-74545575db-bv8pp\" (UID: \"f36beec6-1904-41d7-bb02-d83e22db2c5a\") " pod="openshift-service-ca/service-ca-74545575db-bv8pp" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.945891 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/de29c822-8061-4f04-9a8a-b36f6ab0082e-registry-certificates\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.969323 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-trusted-ca-bundle\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.972037 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.981975 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-session\"" Jan 30 00:11:40 crc kubenswrapper[5113]: I0130 00:11:40.995406 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-session\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.001096 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"audit\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.014502 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-audit-policies\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.021269 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"installation-pull-secrets\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.036589 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/de29c822-8061-4f04-9a8a-b36f6ab0082e-installation-pull-secrets\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.046037 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.046191 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:41.5461656 +0000 UTC m=+121.618770977 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.046677 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/71f792d7-ae36-482e-87f3-fc30cfa91377-proxy-tls\") pod \"machine-config-operator-67c9d58cbb-m9c4q\" (UID: \"71f792d7-ae36-482e-87f3-fc30cfa91377\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.046755 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-marketplace-operator-metrics\") pod \"marketplace-operator-547dbd544d-qljgk\" (UID: \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.046832 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-registration-dir\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.046981 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b181d297-81e3-4d12-8537-1ee3a42213ce-certs\") pod \"machine-config-server-smc9m\" (UID: \"b181d297-81e3-4d12-8537-1ee3a42213ce\") " pod="openshift-machine-config-operator/machine-config-server-smc9m" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.047053 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-mountpoint-dir\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.047104 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-config-volume\") pod \"collect-profiles-29495520-qdtrg\" (UID: \"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.047289 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-b4c2d\" (UniqueName: \"kubernetes.io/projected/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-kube-api-access-b4c2d\") pod \"collect-profiles-29495520-qdtrg\" (UID: \"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.047364 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-6vds9\" (UniqueName: \"kubernetes.io/projected/48229b09-7bab-42bd-83f2-ad2944222630-kube-api-access-6vds9\") pod \"multus-admission-controller-69db94689b-zr7d6\" (UID: \"48229b09-7bab-42bd-83f2-ad2944222630\") " pod="openshift-multus/multus-admission-controller-69db94689b-zr7d6" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.047411 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-mountpoint-dir\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.047451 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-registration-dir\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.047418 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/61eb8992-aa29-40df-bd7b-c3e937249c67-package-server-manager-serving-cert\") pod \"package-server-manager-77f986bd66-nvsp7\" (UID: \"61eb8992-aa29-40df-bd7b-c3e937249c67\") " pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-nvsp7" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.047597 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/de9d205a-be35-4bef-8883-3c11fddc1c8a-webhook-cert\") pod \"packageserver-7d4fc7d867-v7z7g\" (UID: \"de9d205a-be35-4bef-8883-3c11fddc1c8a\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.047632 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b181d297-81e3-4d12-8537-1ee3a42213ce-node-bootstrap-token\") pod \"machine-config-server-smc9m\" (UID: \"b181d297-81e3-4d12-8537-1ee3a42213ce\") " pod="openshift-machine-config-operator/machine-config-server-smc9m" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.047668 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d66505a1-a5af-4565-b177-ab0ea765730c-config\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.047694 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-8hfdm\" (UniqueName: \"kubernetes.io/projected/b181d297-81e3-4d12-8537-1ee3a42213ce-kube-api-access-8hfdm\") pod \"machine-config-server-smc9m\" (UID: \"b181d297-81e3-4d12-8537-1ee3a42213ce\") " pod="openshift-machine-config-operator/machine-config-server-smc9m" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.047722 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/17999f27-0d6c-46f2-82b4-a07bec4b1021-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-75ffdb6fcd-lxlzd\" (UID: \"17999f27-0d6c-46f2-82b4-a07bec4b1021\") " pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-lxlzd" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.047754 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/845806fc-89db-419b-9f4c-b9263fa93527-cert\") pod \"ingress-canary-hzktp\" (UID: \"845806fc-89db-419b-9f4c-b9263fa93527\") " pod="openshift-ingress-canary/ingress-canary-hzktp" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.047793 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-csi-data-dir\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.047815 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d0635667-b4d3-43fb-b783-841a7bf96457-serving-cert\") pod \"kube-controller-manager-operator-69d5f845f8-pnrbz\" (UID: \"d0635667-b4d3-43fb-b783-841a7bf96457\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048001 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d245ee6c-4b68-41b6-b516-38a882666394-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-b5d9m\" (UID: \"d245ee6c-4b68-41b6-b516-38a882666394\") " pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048069 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-n7fpj\" (UniqueName: \"kubernetes.io/projected/1c511f97-df39-47bb-b5e9-58ed11fc3263-kube-api-access-n7fpj\") pod \"dns-default-9wphq\" (UID: \"1c511f97-df39-47bb-b5e9-58ed11fc3263\") " pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048007 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-csi-data-dir\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048105 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d245ee6c-4b68-41b6-b516-38a882666394-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-b5d9m\" (UID: \"d245ee6c-4b68-41b6-b516-38a882666394\") " pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048115 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gfhf2\" (UniqueName: \"kubernetes.io/projected/614dc955-75e1-4543-9e87-e3f4835c927d-kube-api-access-gfhf2\") pod \"service-ca-operator-5b9c976747-pgfzj\" (UID: \"614dc955-75e1-4543-9e87-e3f4835c927d\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-pgfzj" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048157 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-kl7n4\" (UniqueName: \"kubernetes.io/projected/d245ee6c-4b68-41b6-b516-38a882666394-kube-api-access-kl7n4\") pod \"cni-sysctl-allowlist-ds-b5d9m\" (UID: \"d245ee6c-4b68-41b6-b516-38a882666394\") " pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048195 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/1c511f97-df39-47bb-b5e9-58ed11fc3263-tmp-dir\") pod \"dns-default-9wphq\" (UID: \"1c511f97-df39-47bb-b5e9-58ed11fc3263\") " pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048235 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cba9a802-5da5-472a-89eb-8ef391936cb8-srv-cert\") pod \"olm-operator-5cdf44d969-vcjpp\" (UID: \"cba9a802-5da5-472a-89eb-8ef391936cb8\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048291 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/614dc955-75e1-4543-9e87-e3f4835c927d-serving-cert\") pod \"service-ca-operator-5b9c976747-pgfzj\" (UID: \"614dc955-75e1-4543-9e87-e3f4835c927d\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-pgfzj" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048337 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d0635667-b4d3-43fb-b783-841a7bf96457-kube-api-access\") pod \"kube-controller-manager-operator-69d5f845f8-pnrbz\" (UID: \"d0635667-b4d3-43fb-b783-841a7bf96457\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048380 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/fd060364-4a55-4af3-b560-0530d43641d0-profile-collector-cert\") pod \"catalog-operator-75ff9f647d-x975j\" (UID: \"fd060364-4a55-4af3-b560-0530d43641d0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048457 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/d245ee6c-4b68-41b6-b516-38a882666394-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-b5d9m\" (UID: \"d245ee6c-4b68-41b6-b516-38a882666394\") " pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048577 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-sz7cq\" (UniqueName: \"kubernetes.io/projected/cba9a802-5da5-472a-89eb-8ef391936cb8-kube-api-access-sz7cq\") pod \"olm-operator-5cdf44d969-vcjpp\" (UID: \"cba9a802-5da5-472a-89eb-8ef391936cb8\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048645 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/32a41280-9c1f-40b4-85cf-3d18b87e6d55-serving-cert\") pod \"kube-storage-version-migrator-operator-565b79b866-5vp2x\" (UID: \"32a41280-9c1f-40b4-85cf-3d18b87e6d55\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048586 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/1c511f97-df39-47bb-b5e9-58ed11fc3263-tmp-dir\") pod \"dns-default-9wphq\" (UID: \"1c511f97-df39-47bb-b5e9-58ed11fc3263\") " pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048733 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-5fjxh\" (UniqueName: \"kubernetes.io/projected/32a41280-9c1f-40b4-85cf-3d18b87e6d55-kube-api-access-5fjxh\") pod \"kube-storage-version-migrator-operator-565b79b866-5vp2x\" (UID: \"32a41280-9c1f-40b4-85cf-3d18b87e6d55\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048779 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d66505a1-a5af-4565-b177-ab0ea765730c-etcd-client\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048818 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-plugins-dir\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048895 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/fd060364-4a55-4af3-b560-0530d43641d0-srv-cert\") pod \"catalog-operator-75ff9f647d-x975j\" (UID: \"fd060364-4a55-4af3-b560-0530d43641d0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048943 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-plugins-dir\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048948 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/614dc955-75e1-4543-9e87-e3f4835c927d-config\") pod \"service-ca-operator-5b9c976747-pgfzj\" (UID: \"614dc955-75e1-4543-9e87-e3f4835c927d\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-pgfzj" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.048998 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/cba9a802-5da5-472a-89eb-8ef391936cb8-tmpfs\") pod \"olm-operator-5cdf44d969-vcjpp\" (UID: \"cba9a802-5da5-472a-89eb-8ef391936cb8\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049037 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-8cj4s\" (UniqueName: \"kubernetes.io/projected/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-kube-api-access-8cj4s\") pod \"marketplace-operator-547dbd544d-qljgk\" (UID: \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049072 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-x8rps\" (UniqueName: \"kubernetes.io/projected/d66505a1-a5af-4565-b177-ab0ea765730c-kube-api-access-x8rps\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049098 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/503cdef7-1c26-4522-a0d3-09a28bad0340-kube-api-access\") pod \"openshift-kube-scheduler-operator-54f497555d-nm82k\" (UID: \"503cdef7-1c26-4522-a0d3-09a28bad0340\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049122 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cba9a802-5da5-472a-89eb-8ef391936cb8-profile-collector-cert\") pod \"olm-operator-5cdf44d969-vcjpp\" (UID: \"cba9a802-5da5-472a-89eb-8ef391936cb8\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049153 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/d0635667-b4d3-43fb-b783-841a7bf96457-tmp-dir\") pod \"kube-controller-manager-operator-69d5f845f8-pnrbz\" (UID: \"d0635667-b4d3-43fb-b783-841a7bf96457\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049177 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d66505a1-a5af-4565-b177-ab0ea765730c-serving-cert\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049219 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/256a3e7e-168f-4aaf-81dd-0e4da35fcccc-proxy-tls\") pod \"machine-config-controller-f9cdd68f7-v8gqb\" (UID: \"256a3e7e-168f-4aaf-81dd-0e4da35fcccc\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049259 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/48229b09-7bab-42bd-83f2-ad2944222630-webhook-certs\") pod \"multus-admission-controller-69db94689b-zr7d6\" (UID: \"48229b09-7bab-42bd-83f2-ad2944222630\") " pod="openshift-multus/multus-admission-controller-69db94689b-zr7d6" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049301 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049333 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-mtrw5\" (UniqueName: \"kubernetes.io/projected/61eb8992-aa29-40df-bd7b-c3e937249c67-kube-api-access-mtrw5\") pod \"package-server-manager-77f986bd66-nvsp7\" (UID: \"61eb8992-aa29-40df-bd7b-c3e937249c67\") " pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-nvsp7" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049353 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/d66505a1-a5af-4565-b177-ab0ea765730c-etcd-service-ca\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049377 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/de9d205a-be35-4bef-8883-3c11fddc1c8a-tmpfs\") pod \"packageserver-7d4fc7d867-v7z7g\" (UID: \"de9d205a-be35-4bef-8883-3c11fddc1c8a\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049410 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/d245ee6c-4b68-41b6-b516-38a882666394-ready\") pod \"cni-sysctl-allowlist-ds-b5d9m\" (UID: \"d245ee6c-4b68-41b6-b516-38a882666394\") " pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049439 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-dhd9x\" (UniqueName: \"kubernetes.io/projected/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-kube-api-access-dhd9x\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049458 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/fd060364-4a55-4af3-b560-0530d43641d0-tmpfs\") pod \"catalog-operator-75ff9f647d-x975j\" (UID: \"fd060364-4a55-4af3-b560-0530d43641d0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049477 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/d66505a1-a5af-4565-b177-ab0ea765730c-etcd-ca\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049551 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/de9d205a-be35-4bef-8883-3c11fddc1c8a-apiservice-cert\") pod \"packageserver-7d4fc7d867-v7z7g\" (UID: \"de9d205a-be35-4bef-8883-3c11fddc1c8a\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049588 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/32a41280-9c1f-40b4-85cf-3d18b87e6d55-config\") pod \"kube-storage-version-migrator-operator-565b79b866-5vp2x\" (UID: \"32a41280-9c1f-40b4-85cf-3d18b87e6d55\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049623 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/71f792d7-ae36-482e-87f3-fc30cfa91377-auth-proxy-config\") pod \"machine-config-operator-67c9d58cbb-m9c4q\" (UID: \"71f792d7-ae36-482e-87f3-fc30cfa91377\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049653 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-kkw8c\" (UniqueName: \"kubernetes.io/projected/17999f27-0d6c-46f2-82b4-a07bec4b1021-kube-api-access-kkw8c\") pod \"control-plane-machine-set-operator-75ffdb6fcd-lxlzd\" (UID: \"17999f27-0d6c-46f2-82b4-a07bec4b1021\") " pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-lxlzd" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049692 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/f36beec6-1904-41d7-bb02-d83e22db2c5a-signing-cabundle\") pod \"service-ca-74545575db-bv8pp\" (UID: \"f36beec6-1904-41d7-bb02-d83e22db2c5a\") " pod="openshift-service-ca/service-ca-74545575db-bv8pp" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049721 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-phnqd\" (UniqueName: \"kubernetes.io/projected/f36beec6-1904-41d7-bb02-d83e22db2c5a-kube-api-access-phnqd\") pod \"service-ca-74545575db-bv8pp\" (UID: \"f36beec6-1904-41d7-bb02-d83e22db2c5a\") " pod="openshift-service-ca/service-ca-74545575db-bv8pp" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049748 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c511f97-df39-47bb-b5e9-58ed11fc3263-config-volume\") pod \"dns-default-9wphq\" (UID: \"1c511f97-df39-47bb-b5e9-58ed11fc3263\") " pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049776 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/503cdef7-1c26-4522-a0d3-09a28bad0340-config\") pod \"openshift-kube-scheduler-operator-54f497555d-nm82k\" (UID: \"503cdef7-1c26-4522-a0d3-09a28bad0340\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049819 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-socket-dir\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049849 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-sj5qv\" (UniqueName: \"kubernetes.io/projected/71f792d7-ae36-482e-87f3-fc30cfa91377-kube-api-access-sj5qv\") pod \"machine-config-operator-67c9d58cbb-m9c4q\" (UID: \"71f792d7-ae36-482e-87f3-fc30cfa91377\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049876 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/256a3e7e-168f-4aaf-81dd-0e4da35fcccc-mcc-auth-proxy-config\") pod \"machine-config-controller-f9cdd68f7-v8gqb\" (UID: \"256a3e7e-168f-4aaf-81dd-0e4da35fcccc\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049936 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/f36beec6-1904-41d7-bb02-d83e22db2c5a-signing-key\") pod \"service-ca-74545575db-bv8pp\" (UID: \"f36beec6-1904-41d7-bb02-d83e22db2c5a\") " pod="openshift-service-ca/service-ca-74545575db-bv8pp" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049965 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0635667-b4d3-43fb-b783-841a7bf96457-config\") pod \"kube-controller-manager-operator-69d5f845f8-pnrbz\" (UID: \"d0635667-b4d3-43fb-b783-841a7bf96457\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.049993 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-lbrpx\" (UniqueName: \"kubernetes.io/projected/fd060364-4a55-4af3-b560-0530d43641d0-kube-api-access-lbrpx\") pod \"catalog-operator-75ff9f647d-x975j\" (UID: \"fd060364-4a55-4af3-b560-0530d43641d0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050027 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/d66505a1-a5af-4565-b177-ab0ea765730c-tmp-dir\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050064 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/503cdef7-1c26-4522-a0d3-09a28bad0340-serving-cert\") pod \"openshift-kube-scheduler-operator-54f497555d-nm82k\" (UID: \"503cdef7-1c26-4522-a0d3-09a28bad0340\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050067 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/d0635667-b4d3-43fb-b783-841a7bf96457-tmp-dir\") pod \"kube-controller-manager-operator-69d5f845f8-pnrbz\" (UID: \"d0635667-b4d3-43fb-b783-841a7bf96457\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050105 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/503cdef7-1c26-4522-a0d3-09a28bad0340-tmp\") pod \"openshift-kube-scheduler-operator-54f497555d-nm82k\" (UID: \"503cdef7-1c26-4522-a0d3-09a28bad0340\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050234 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-secret-volume\") pod \"collect-profiles-29495520-qdtrg\" (UID: \"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050292 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-vkwrr\" (UniqueName: \"kubernetes.io/projected/3634960c-b798-480d-b652-3ffca7e9cf70-kube-api-access-vkwrr\") pod \"migrator-866fcbc849-ng6js\" (UID: \"3634960c-b798-480d-b652-3ffca7e9cf70\") " pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-ng6js" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050328 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-marketplace-trusted-ca\") pod \"marketplace-operator-547dbd544d-qljgk\" (UID: \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050353 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/d245ee6c-4b68-41b6-b516-38a882666394-ready\") pod \"cni-sysctl-allowlist-ds-b5d9m\" (UID: \"d245ee6c-4b68-41b6-b516-38a882666394\") " pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050381 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-8pk9h\" (UniqueName: \"kubernetes.io/projected/256a3e7e-168f-4aaf-81dd-0e4da35fcccc-kube-api-access-8pk9h\") pod \"machine-config-controller-f9cdd68f7-v8gqb\" (UID: \"256a3e7e-168f-4aaf-81dd-0e4da35fcccc\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050393 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/fd060364-4a55-4af3-b560-0530d43641d0-tmpfs\") pod \"catalog-operator-75ff9f647d-x975j\" (UID: \"fd060364-4a55-4af3-b560-0530d43641d0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050438 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-tmp\") pod \"marketplace-operator-547dbd544d-qljgk\" (UID: \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050502 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-socket-dir\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050507 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/503cdef7-1c26-4522-a0d3-09a28bad0340-tmp\") pod \"openshift-kube-scheduler-operator-54f497555d-nm82k\" (UID: \"503cdef7-1c26-4522-a0d3-09a28bad0340\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050650 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b181d297-81e3-4d12-8537-1ee3a42213ce-node-bootstrap-token\") pod \"machine-config-server-smc9m\" (UID: \"b181d297-81e3-4d12-8537-1ee3a42213ce\") " pod="openshift-machine-config-operator/machine-config-server-smc9m" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050510 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-tnvxp\" (UniqueName: \"kubernetes.io/projected/de9d205a-be35-4bef-8883-3c11fddc1c8a-kube-api-access-tnvxp\") pod \"packageserver-7d4fc7d867-v7z7g\" (UID: \"de9d205a-be35-4bef-8883-3c11fddc1c8a\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050740 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/71f792d7-ae36-482e-87f3-fc30cfa91377-images\") pod \"machine-config-operator-67c9d58cbb-m9c4q\" (UID: \"71f792d7-ae36-482e-87f3-fc30cfa91377\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050809 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1c511f97-df39-47bb-b5e9-58ed11fc3263-metrics-tls\") pod \"dns-default-9wphq\" (UID: \"1c511f97-df39-47bb-b5e9-58ed11fc3263\") " pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050832 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-s8kgz\" (UniqueName: \"kubernetes.io/projected/845806fc-89db-419b-9f4c-b9263fa93527-kube-api-access-s8kgz\") pod \"ingress-canary-hzktp\" (UID: \"845806fc-89db-419b-9f4c-b9263fa93527\") " pod="openshift-ingress-canary/ingress-canary-hzktp" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.050976 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/d66505a1-a5af-4565-b177-ab0ea765730c-tmp-dir\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.051275 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:41.551237988 +0000 UTC m=+121.623843575 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.051294 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/de9d205a-be35-4bef-8883-3c11fddc1c8a-tmpfs\") pod \"packageserver-7d4fc7d867-v7z7g\" (UID: \"de9d205a-be35-4bef-8883-3c11fddc1c8a\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.051429 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/cba9a802-5da5-472a-89eb-8ef391936cb8-tmpfs\") pod \"olm-operator-5cdf44d969-vcjpp\" (UID: \"cba9a802-5da5-472a-89eb-8ef391936cb8\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.052846 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-tmp\") pod \"marketplace-operator-547dbd544d-qljgk\" (UID: \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.056703 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b181d297-81e3-4d12-8537-1ee3a42213ce-certs\") pod \"machine-config-server-smc9m\" (UID: \"b181d297-81e3-4d12-8537-1ee3a42213ce\") " pod="openshift-machine-config-operator/machine-config-server-smc9m" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.060984 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-certs-default\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.075356 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-default-certificate\") pod \"router-default-68cf44c8b8-pdqxh\" (UID: \"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433\") " pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.081640 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-idp-0-file-data\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.089403 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/256a3e7e-168f-4aaf-81dd-0e4da35fcccc-mcc-auth-proxy-config\") pod \"machine-config-controller-f9cdd68f7-v8gqb\" (UID: \"256a3e7e-168f-4aaf-81dd-0e4da35fcccc\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.100818 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/71f792d7-ae36-482e-87f3-fc30cfa91377-auth-proxy-config\") pod \"machine-config-operator-67c9d58cbb-m9c4q\" (UID: \"71f792d7-ae36-482e-87f3-fc30cfa91377\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.101884 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.102027 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-metrics-certs-default\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.116259 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-metrics-certs\") pod \"router-default-68cf44c8b8-pdqxh\" (UID: \"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433\") " pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.121724 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-template-login\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.136966 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-login\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.147124 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-serving-cert\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.151825 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.152094 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:41.652064424 +0000 UTC m=+121.724669801 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.152253 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.152874 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:41.652849718 +0000 UTC m=+121.725455105 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.156651 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-serving-cert\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.162686 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-config\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.173563 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/88af8dd9-5994-4276-a701-8dc0af32b4bb-config\") pod \"openshift-apiserver-operator-846cbfc458-zmdjn\" (UID: \"88af8dd9-5994-4276-a701-8dc0af32b4bb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.192807 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-ocp-branding-template\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.201209 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-service-ca\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.203827 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-service-ca\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.210335 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.221255 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-config\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.225610 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a4715539-c6a4-4c6f-9380-73fae6de4fe0-config\") pod \"kube-apiserver-operator-575994946d-pstq8\" (UID: \"a4715539-c6a4-4c6f-9380-73fae6de4fe0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.242121 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-template-error\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.246632 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-error\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.254598 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.254820 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:41.754774218 +0000 UTC m=+121.827379595 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.255105 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-9zkh5\" (UniqueName: \"kubernetes.io/projected/c4af1b2c-aad9-48c2-b1d7-36cd069c556d-kube-api-access-9zkh5\") pod \"cluster-samples-operator-6b564684c8-44zv4\" (UID: \"c4af1b2c-aad9-48c2-b1d7-36cd069c556d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.255190 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.255649 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:41.755628795 +0000 UTC m=+121.828234172 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.262402 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-9zkh5\" (UniqueName: \"kubernetes.io/projected/c4af1b2c-aad9-48c2-b1d7-36cd069c556d-kube-api-access-9zkh5\") pod \"cluster-samples-operator-6b564684c8-44zv4\" (UID: \"c4af1b2c-aad9-48c2-b1d7-36cd069c556d\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.281390 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-router-certs\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.286191 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-router-certs\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.302830 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-template-provider-selection\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.308768 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.356920 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.357115 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:41.857062899 +0000 UTC m=+121.929668296 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.358261 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.358696 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:41.858679119 +0000 UTC m=+121.931284496 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.361957 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress\"/\"service-ca-bundle\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.366066 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-service-ca-bundle\") pod \"router-default-68cf44c8b8-pdqxh\" (UID: \"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433\") " pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.381844 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"image-registry-tls\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.391221 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-registry-tls\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.421586 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-cliconfig\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.427309 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-cliconfig\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.443767 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-stats-default\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.453229 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-stats-auth\") pod \"router-default-68cf44c8b8-pdqxh\" (UID: \"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433\") " pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.459436 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.459964 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:41.959909108 +0000 UTC m=+122.032514485 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.460298 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.460858 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:41.960847887 +0000 UTC m=+122.033453264 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.466745 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" event={"ID":"c6737a55-918a-4c58-ac84-4e1f78ddff5e","Type":"ContainerStarted","Data":"c83384640f10ea505b7e6ba0dad736e86a5f64482878ce5196969a4dfc7905c7"} Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.481815 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-serving-cert\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.483175 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-bound-sa-token\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.487386 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a4715539-c6a4-4c6f-9380-73fae6de4fe0-serving-cert\") pod \"kube-apiserver-operator-575994946d-pstq8\" (UID: \"a4715539-c6a4-4c6f-9380-73fae6de4fe0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.491950 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" event={"ID":"2554dafb-e152-489f-a585-bfe5638c0b82","Type":"ContainerStarted","Data":"57aa6a781bf35ab69657d56f03a38ddb4163d7b8d223f2661c4157f9d9ed9a4a"} Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.517147 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9g2k\" (UniqueName: \"kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-kube-api-access-c9g2k\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.521771 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-serving-cert\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.530850 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/88af8dd9-5994-4276-a701-8dc0af32b4bb-serving-cert\") pod \"openshift-apiserver-operator-846cbfc458-zmdjn\" (UID: \"88af8dd9-5994-4276-a701-8dc0af32b4bb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.545834 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"mco-proxy-tls\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.554318 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/71f792d7-ae36-482e-87f3-fc30cfa91377-proxy-tls\") pod \"machine-config-operator-67c9d58cbb-m9c4q\" (UID: \"71f792d7-ae36-482e-87f3-fc30cfa91377\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.562049 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"marketplace-operator-metrics\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.569006 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.569260 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.069232158 +0000 UTC m=+122.141837535 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.570010 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.570384 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.070363093 +0000 UTC m=+122.142968470 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.580291 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operator-lifecycle-manager\"/\"collect-profiles-config\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.584142 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-marketplace-operator-metrics\") pod \"marketplace-operator-547dbd544d-qljgk\" (UID: \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.591459 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-config-volume\") pod \"collect-profiles-29495520-qdtrg\" (UID: \"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.639162 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vds9\" (UniqueName: \"kubernetes.io/projected/48229b09-7bab-42bd-83f2-ad2944222630-kube-api-access-6vds9\") pod \"multus-admission-controller-69db94689b-zr7d6\" (UID: \"48229b09-7bab-42bd-83f2-ad2944222630\") " pod="openshift-multus/multus-admission-controller-69db94689b-zr7d6" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.641972 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"package-server-manager-serving-cert\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.652357 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/61eb8992-aa29-40df-bd7b-c3e937249c67-package-server-manager-serving-cert\") pod \"package-server-manager-77f986bd66-nvsp7\" (UID: \"61eb8992-aa29-40df-bd7b-c3e937249c67\") " pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-nvsp7" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.662054 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"packageserver-service-cert\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.670781 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.671005 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.170979192 +0000 UTC m=+122.243584569 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.671278 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.671791 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.171779188 +0000 UTC m=+122.244384565 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.676040 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/de9d205a-be35-4bef-8883-3c11fddc1c8a-webhook-cert\") pod \"packageserver-7d4fc7d867-v7z7g\" (UID: \"de9d205a-be35-4bef-8883-3c11fddc1c8a\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.676540 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/de9d205a-be35-4bef-8883-3c11fddc1c8a-apiservice-cert\") pod \"packageserver-7d4fc7d867-v7z7g\" (UID: \"de9d205a-be35-4bef-8883-3c11fddc1c8a\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.697750 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hfdm\" (UniqueName: \"kubernetes.io/projected/b181d297-81e3-4d12-8537-1ee3a42213ce-kube-api-access-8hfdm\") pod \"machine-config-server-smc9m\" (UID: \"b181d297-81e3-4d12-8537-1ee3a42213ce\") " pod="openshift-machine-config-operator/machine-config-server-smc9m" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.701864 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"etcd-operator-config\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.709854 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d66505a1-a5af-4565-b177-ab0ea765730c-config\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.721655 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"control-plane-machine-set-operator-tls\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.734712 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/17999f27-0d6c-46f2-82b4-a07bec4b1021-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-75ffdb6fcd-lxlzd\" (UID: \"17999f27-0d6c-46f2-82b4-a07bec4b1021\") " pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-lxlzd" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.742165 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-serving-cert\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.753087 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d0635667-b4d3-43fb-b783-841a7bf96457-serving-cert\") pod \"kube-controller-manager-operator-69d5f845f8-pnrbz\" (UID: \"d0635667-b4d3-43fb-b783-841a7bf96457\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.761677 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-canary\"/\"canary-serving-cert\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.771799 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/845806fc-89db-419b-9f4c-b9263fa93527-cert\") pod \"ingress-canary-hzktp\" (UID: \"845806fc-89db-419b-9f4c-b9263fa93527\") " pod="openshift-ingress-canary/ingress-canary-hzktp" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.772612 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.773298 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.273274583 +0000 UTC m=+122.345879960 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.800900 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-n7fpj\" (UniqueName: \"kubernetes.io/projected/1c511f97-df39-47bb-b5e9-58ed11fc3263-kube-api-access-n7fpj\") pod \"dns-default-9wphq\" (UID: \"1c511f97-df39-47bb-b5e9-58ed11fc3263\") " pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.840802 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-kl7n4\" (UniqueName: \"kubernetes.io/projected/d245ee6c-4b68-41b6-b516-38a882666394-kube-api-access-kl7n4\") pod \"cni-sysctl-allowlist-ds-b5d9m\" (UID: \"d245ee6c-4b68-41b6-b516-38a882666394\") " pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.842099 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca-operator\"/\"serving-cert\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.853935 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/614dc955-75e1-4543-9e87-e3f4835c927d-serving-cert\") pod \"service-ca-operator-5b9c976747-pgfzj\" (UID: \"614dc955-75e1-4543-9e87-e3f4835c927d\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-pgfzj" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.861471 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"olm-operator-serving-cert\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.876122 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.876706 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.37668501 +0000 UTC m=+122.449290447 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.879094 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cba9a802-5da5-472a-89eb-8ef391936cb8-srv-cert\") pod \"olm-operator-5cdf44d969-vcjpp\" (UID: \"cba9a802-5da5-472a-89eb-8ef391936cb8\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.901810 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"cni-sysctl-allowlist\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.909885 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/d245ee6c-4b68-41b6-b516-38a882666394-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-b5d9m\" (UID: \"d245ee6c-4b68-41b6-b516-38a882666394\") " pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.921143 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"pprof-cert\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.935066 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-secret-volume\") pod \"collect-profiles-29495520-qdtrg\" (UID: \"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.936198 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cba9a802-5da5-472a-89eb-8ef391936cb8-profile-collector-cert\") pod \"olm-operator-5cdf44d969-vcjpp\" (UID: \"cba9a802-5da5-472a-89eb-8ef391936cb8\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.936647 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/fd060364-4a55-4af3-b560-0530d43641d0-profile-collector-cert\") pod \"catalog-operator-75ff9f647d-x975j\" (UID: \"fd060364-4a55-4af3-b560-0530d43641d0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.983845 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.984093 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-etcd-operator\"/\"etcd-client\"" Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.989086 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.989374 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.489352754 +0000 UTC m=+122.561958131 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.990086 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:41 crc kubenswrapper[5113]: E0130 00:11:41.990417 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.490404077 +0000 UTC m=+122.563009474 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:41 crc kubenswrapper[5113]: I0130 00:11:41.995209 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d66505a1-a5af-4565-b177-ab0ea765730c-etcd-client\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.001260 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"catalog-operator-serving-cert\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.022641 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca-operator\"/\"service-ca-operator-config\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.031040 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/614dc955-75e1-4543-9e87-e3f4835c927d-config\") pod \"service-ca-operator-5b9c976747-pgfzj\" (UID: \"614dc955-75e1-4543-9e87-e3f4835c927d\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-pgfzj" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.041377 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"serving-cert\"" Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.049637 5113 secret.go:189] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.049748 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d66505a1-a5af-4565-b177-ab0ea765730c-serving-cert podName:d66505a1-a5af-4565-b177-ab0ea765730c nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.549720852 +0000 UTC m=+122.622326219 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d66505a1-a5af-4565-b177-ab0ea765730c-serving-cert") pod "etcd-operator-69b85846b6-t984r" (UID: "d66505a1-a5af-4565-b177-ab0ea765730c") : failed to sync secret cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.049909 5113 configmap.go:193] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050044 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d66505a1-a5af-4565-b177-ab0ea765730c-etcd-ca podName:d66505a1-a5af-4565-b177-ab0ea765730c nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.5500099 +0000 UTC m=+122.622615277 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/d66505a1-a5af-4565-b177-ab0ea765730c-etcd-ca") pod "etcd-operator-69b85846b6-t984r" (UID: "d66505a1-a5af-4565-b177-ab0ea765730c") : failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050550 5113 configmap.go:193] Couldn't get configMap openshift-dns/dns-default: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050597 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1c511f97-df39-47bb-b5e9-58ed11fc3263-config-volume podName:1c511f97-df39-47bb-b5e9-58ed11fc3263 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.550585578 +0000 UTC m=+122.623190955 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/1c511f97-df39-47bb-b5e9-58ed11fc3263-config-volume") pod "dns-default-9wphq" (UID: "1c511f97-df39-47bb-b5e9-58ed11fc3263") : failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050628 5113 configmap.go:193] Couldn't get configMap openshift-marketplace/marketplace-trusted-ca: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050625 5113 secret.go:189] Couldn't get secret openshift-service-ca/signing-key: failed to sync secret cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050692 5113 configmap.go:193] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050734 5113 configmap.go:193] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050776 5113 secret.go:189] Couldn't get secret openshift-multus/multus-admission-controller-secret: failed to sync secret cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050807 5113 configmap.go:193] Couldn't get configMap openshift-kube-controller-manager-operator/kube-controller-manager-operator-config: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050665 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-marketplace-trusted-ca podName:f7e91b03-5282-4f6e-8ed2-a44afa3fc350 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.55065538 +0000 UTC m=+122.623260997 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "marketplace-trusted-ca" (UniqueName: "kubernetes.io/configmap/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-marketplace-trusted-ca") pod "marketplace-operator-547dbd544d-qljgk" (UID: "f7e91b03-5282-4f6e-8ed2-a44afa3fc350") : failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050853 5113 configmap.go:193] Couldn't get configMap openshift-service-ca/signing-cabundle: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050888 5113 secret.go:189] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050890 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/32a41280-9c1f-40b4-85cf-3d18b87e6d55-config podName:32a41280-9c1f-40b4-85cf-3d18b87e6d55 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.550844766 +0000 UTC m=+122.623450173 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/32a41280-9c1f-40b4-85cf-3d18b87e6d55-config") pod "kube-storage-version-migrator-operator-565b79b866-5vp2x" (UID: "32a41280-9c1f-40b4-85cf-3d18b87e6d55") : failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050925 5113 configmap.go:193] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050924 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/503cdef7-1c26-4522-a0d3-09a28bad0340-config podName:503cdef7-1c26-4522-a0d3-09a28bad0340 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.550909288 +0000 UTC m=+122.623514695 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/503cdef7-1c26-4522-a0d3-09a28bad0340-config") pod "openshift-kube-scheduler-operator-54f497555d-nm82k" (UID: "503cdef7-1c26-4522-a0d3-09a28bad0340") : failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050955 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f36beec6-1904-41d7-bb02-d83e22db2c5a-signing-key podName:f36beec6-1904-41d7-bb02-d83e22db2c5a nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.550944069 +0000 UTC m=+122.623549486 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/f36beec6-1904-41d7-bb02-d83e22db2c5a-signing-key") pod "service-ca-74545575db-bv8pp" (UID: "f36beec6-1904-41d7-bb02-d83e22db2c5a") : failed to sync secret cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050964 5113 secret.go:189] Couldn't get secret openshift-dns/dns-default-metrics-tls: failed to sync secret cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050977 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/48229b09-7bab-42bd-83f2-ad2944222630-webhook-certs podName:48229b09-7bab-42bd-83f2-ad2944222630 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.55096651 +0000 UTC m=+122.623571927 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/48229b09-7bab-42bd-83f2-ad2944222630-webhook-certs") pod "multus-admission-controller-69db94689b-zr7d6" (UID: "48229b09-7bab-42bd-83f2-ad2944222630") : failed to sync secret cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.050999 5113 configmap.go:193] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.051001 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d0635667-b4d3-43fb-b783-841a7bf96457-config podName:d0635667-b4d3-43fb-b783-841a7bf96457 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.550989791 +0000 UTC m=+122.623595198 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/d0635667-b4d3-43fb-b783-841a7bf96457-config") pod "kube-controller-manager-operator-69d5f845f8-pnrbz" (UID: "d0635667-b4d3-43fb-b783-841a7bf96457") : failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.051023 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/d66505a1-a5af-4565-b177-ab0ea765730c-etcd-service-ca podName:d66505a1-a5af-4565-b177-ab0ea765730c nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.551015342 +0000 UTC m=+122.623620719 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/d66505a1-a5af-4565-b177-ab0ea765730c-etcd-service-ca") pod "etcd-operator-69b85846b6-t984r" (UID: "d66505a1-a5af-4565-b177-ab0ea765730c") : failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.051042 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f36beec6-1904-41d7-bb02-d83e22db2c5a-signing-cabundle podName:f36beec6-1904-41d7-bb02-d83e22db2c5a nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.551035412 +0000 UTC m=+122.623640779 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/f36beec6-1904-41d7-bb02-d83e22db2c5a-signing-cabundle") pod "service-ca-74545575db-bv8pp" (UID: "f36beec6-1904-41d7-bb02-d83e22db2c5a") : failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.051056 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/503cdef7-1c26-4522-a0d3-09a28bad0340-serving-cert podName:503cdef7-1c26-4522-a0d3-09a28bad0340 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.551049803 +0000 UTC m=+122.623655180 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/503cdef7-1c26-4522-a0d3-09a28bad0340-serving-cert") pod "openshift-kube-scheduler-operator-54f497555d-nm82k" (UID: "503cdef7-1c26-4522-a0d3-09a28bad0340") : failed to sync secret cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.051071 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/71f792d7-ae36-482e-87f3-fc30cfa91377-images podName:71f792d7-ae36-482e-87f3-fc30cfa91377 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.551063743 +0000 UTC m=+122.623669370 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/71f792d7-ae36-482e-87f3-fc30cfa91377-images") pod "machine-config-operator-67c9d58cbb-m9c4q" (UID: "71f792d7-ae36-482e-87f3-fc30cfa91377") : failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.051087 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1c511f97-df39-47bb-b5e9-58ed11fc3263-metrics-tls podName:1c511f97-df39-47bb-b5e9-58ed11fc3263 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.551079874 +0000 UTC m=+122.623685491 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/1c511f97-df39-47bb-b5e9-58ed11fc3263-metrics-tls") pod "dns-default-9wphq" (UID: "1c511f97-df39-47bb-b5e9-58ed11fc3263") : failed to sync secret cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.051489 5113 secret.go:189] Couldn't get secret openshift-machine-config-operator/mcc-proxy-tls: failed to sync secret cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.051628 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/256a3e7e-168f-4aaf-81dd-0e4da35fcccc-proxy-tls podName:256a3e7e-168f-4aaf-81dd-0e4da35fcccc nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.55159996 +0000 UTC m=+122.624205517 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/256a3e7e-168f-4aaf-81dd-0e4da35fcccc-proxy-tls") pod "machine-config-controller-f9cdd68f7-v8gqb" (UID: "256a3e7e-168f-4aaf-81dd-0e4da35fcccc") : failed to sync secret cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.053835 5113 projected.go:289] Couldn't get configMap openshift-kube-apiserver-operator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.053886 5113 projected.go:194] Error preparing data for projected volume kube-api-access for pod openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.053963 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/a4715539-c6a4-4c6f-9380-73fae6de4fe0-kube-api-access podName:a4715539-c6a4-4c6f-9380-73fae6de4fe0 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.553945283 +0000 UTC m=+122.626550690 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access" (UniqueName: "kubernetes.io/projected/a4715539-c6a4-4c6f-9380-73fae6de4fe0-kube-api-access") pod "kube-apiserver-operator-575994946d-pstq8" (UID: "a4715539-c6a4-4c6f-9380-73fae6de4fe0") : failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.058245 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/32a41280-9c1f-40b4-85cf-3d18b87e6d55-serving-cert\") pod \"kube-storage-version-migrator-operator-565b79b866-5vp2x\" (UID: \"32a41280-9c1f-40b4-85cf-3d18b87e6d55\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.059664 5113 request.go:752] "Waited before sending request" delay="1.010131206s" reason="client-side throttling, not priority and fairness" verb="POST" URL="https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/serviceaccounts/openshift-kube-scheduler-operator/token" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.081044 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-etcd-operator\"/\"etcd-operator-serving-cert\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.091158 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.091372 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.591319566 +0000 UTC m=+122.663924973 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.091963 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.092580 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.592549463 +0000 UTC m=+122.665155100 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.098049 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/fd060364-4a55-4af3-b560-0530d43641d0-srv-cert\") pod \"catalog-operator-75ff9f647d-x975j\" (UID: \"fd060364-4a55-4af3-b560-0530d43641d0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" Jan 30 00:11:42 crc kubenswrapper[5113]: W0130 00:11:42.100868 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd245ee6c_4b68_41b6_b516_38a882666394.slice/crio-c49aca45d0e6e27d209612c675dc46485683c2b3053a6b6fb0e20dd090ffce1e WatchSource:0}: Error finding container c49aca45d0e6e27d209612c675dc46485683c2b3053a6b6fb0e20dd090ffce1e: Status 404 returned error can't find the container with id c49aca45d0e6e27d209612c675dc46485683c2b3053a6b6fb0e20dd090ffce1e Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.140552 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns\"/\"dns-default\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.161314 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"etcd-ca-bundle\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.181497 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"config\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.193541 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.193953 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.693894785 +0000 UTC m=+122.766500332 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.194273 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.195145 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.695118504 +0000 UTC m=+122.767723911 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.228156 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkw8c\" (UniqueName: \"kubernetes.io/projected/17999f27-0d6c-46f2-82b4-a07bec4b1021-kube-api-access-kkw8c\") pod \"control-plane-machine-set-operator-75ffdb6fcd-lxlzd\" (UID: \"17999f27-0d6c-46f2-82b4-a07bec4b1021\") " pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-lxlzd" Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.271999 5113 projected.go:289] Couldn't get configMap openshift-ingress/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.273004 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-marketplace\"/\"marketplace-trusted-ca\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.296229 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.296601 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.796542068 +0000 UTC m=+122.869147455 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.297346 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.297978 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.797965312 +0000 UTC m=+122.870570699 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.299119 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pk9h\" (UniqueName: \"kubernetes.io/projected/256a3e7e-168f-4aaf-81dd-0e4da35fcccc-kube-api-access-8pk9h\") pod \"machine-config-controller-f9cdd68f7-v8gqb\" (UID: \"256a3e7e-168f-4aaf-81dd-0e4da35fcccc\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.321638 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca\"/\"signing-key\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.343052 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-config\"" Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.352952 5113 projected.go:289] Couldn't get configMap openshift-apiserver-operator/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.363259 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-config\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.383936 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"multus-admission-controller-secret\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.386119 5113 kubelet_pods.go:1019] "Unable to retrieve pull secret, the image pull may not succeed." pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4" secret="" err="failed to sync secret cache: timed out waiting for the condition" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.386250 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.399276 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.399642 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.899585213 +0000 UTC m=+122.972190610 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.400231 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.400768 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:42.900744929 +0000 UTC m=+122.973350316 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.402817 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-scheduler-operator\"/\"kube-scheduler-operator-serving-cert\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.422145 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns\"/\"dns-default-metrics-tls\"" Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.428824 5113 projected.go:289] Couldn't get configMap openshift-authentication/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.445596 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-operator-images\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.504161 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.505121 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.005091054 +0000 UTC m=+123.077696431 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.505493 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.505909 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.005896759 +0000 UTC m=+123.078502146 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.507849 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" event={"ID":"c6737a55-918a-4c58-ac84-4e1f78ddff5e","Type":"ContainerStarted","Data":"346b97bbf77c7177497e89a90ae1af749bea7a015f09a3340a23ce0efea684cf"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.512052 5113 generic.go:358] "Generic (PLEG): container finished" podID="44725449-ac50-4ae0-935a-4d70c1a921f1" containerID="45f5f36b749f9e0862d5c5ed1b06adb8ff51d092f61abbbd88f895ce2d95ae3d" exitCode=0 Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.512175 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" event={"ID":"44725449-ac50-4ae0-935a-4d70c1a921f1","Type":"ContainerDied","Data":"45f5f36b749f9e0862d5c5ed1b06adb8ff51d092f61abbbd88f895ce2d95ae3d"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.521270 5113 generic.go:358] "Generic (PLEG): container finished" podID="37c8062b-4496-46f0-9562-9f9d27740557" containerID="4439ea8c4fa329eafc237577b5f10a7dbd905becc5743618ce57bef4fa917a1c" exitCode=0 Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.521515 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" event={"ID":"37c8062b-4496-46f0-9562-9f9d27740557","Type":"ContainerDied","Data":"4439ea8c4fa329eafc237577b5f10a7dbd905becc5743618ce57bef4fa917a1c"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.529442 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" event={"ID":"0c156d1d-c9e6-43e7-b515-7a9314879127","Type":"ContainerStarted","Data":"6f65d6b37765e60c178a179dd9ad88253d5407b8a9163c57b8feee9402cbbb66"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.529490 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" event={"ID":"0c156d1d-c9e6-43e7-b515-7a9314879127","Type":"ContainerStarted","Data":"bf8feab964419c41f54a2724e8e3770c85ed082f085111c3e7111ca812fba165"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.533448 5113 generic.go:358] "Generic (PLEG): container finished" podID="2554dafb-e152-489f-a585-bfe5638c0b82" containerID="57aa6a781bf35ab69657d56f03a38ddb4163d7b8d223f2661c4157f9d9ed9a4a" exitCode=0 Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.533575 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" event={"ID":"2554dafb-e152-489f-a585-bfe5638c0b82","Type":"ContainerDied","Data":"57aa6a781bf35ab69657d56f03a38ddb4163d7b8d223f2661c4157f9d9ed9a4a"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.540240 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-799b87ffcd-dbqgp" event={"ID":"d334727d-3c8b-4f75-a3ea-a4b537fe480c","Type":"ContainerStarted","Data":"3e73b04a78da160711cd06b8a744e4148b27950f7f7e4786fd1ed3f92c717eaa"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.541190 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca\"/\"signing-cabundle\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.541905 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-sj5qv\" (UniqueName: \"kubernetes.io/projected/71f792d7-ae36-482e-87f3-fc30cfa91377-kube-api-access-sj5qv\") pod \"machine-config-operator-67c9d58cbb-m9c4q\" (UID: \"71f792d7-ae36-482e-87f3-fc30cfa91377\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.544566 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" event={"ID":"11b1745f-9b78-40f8-bb20-b2c2590e4f46","Type":"ContainerStarted","Data":"8cab3b89ce37b6fcdcebb18337c55edcdf716c09368859a48fab0187ee344084"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.547181 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-64d44f6ddf-6z7rp" event={"ID":"a5ca6cf7-be11-4cd5-952b-b890c7e3b26e","Type":"ContainerStarted","Data":"e9e564e363be8da86283d7263a439e22a23ff56530b423c435cb5e4590cf14c1"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.567280 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" event={"ID":"41f05909-fc3b-4e9f-85e5-4df36ec3b431","Type":"ContainerStarted","Data":"c23ae084096745f51d1403f4d29f002597cf7e9f4099f67e5e5f51c3316bb8b3"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.568999 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"etcd-service-ca-bundle\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.570256 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-67c89758df-qh9jw" event={"ID":"fde32e16-bb98-4b8e-9e25-fdaf5b8df655","Type":"ContainerStarted","Data":"288891ecca142e3436a25ac1e47f444236e061041bb77054575ab3380554562d"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.581476 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-747b44746d-kxkvb" event={"ID":"d6483c17-196a-4e41-8950-46d60c5505c9","Type":"ContainerStarted","Data":"b34fac1ea852daac9a9beb2f536c0f45f6338061f0bc624a045fd1bf07edd47d"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.582487 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"mcc-proxy-tls\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.584697 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8" event={"ID":"16a06343-d795-4c00-8684-13e9158ab544","Type":"ContainerStarted","Data":"e4fe91c592fceec43ee8156ce8dea4af2faf2436b4d8f96957f53c3f23632649"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.594614 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" event={"ID":"d245ee6c-4b68-41b6-b516-38a882666394","Type":"ContainerStarted","Data":"c49aca45d0e6e27d209612c675dc46485683c2b3053a6b6fb0e20dd090ffce1e"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.598020 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" event={"ID":"70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c","Type":"ContainerStarted","Data":"491f76faa04b84df9805ea781e08ffe0eff7af1d398a984930323fe2c420e2d1"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.604569 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" event={"ID":"4669eb3c-24d5-4643-91d1-de96326757fa","Type":"ContainerStarted","Data":"d2518e74c027a7fef75f3a28712197dbc5b4c4f39c226efe3d223cffcef991a2"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.606096 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-console-operator/console-operator-67c89758df-qh9jw" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.606812 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.607169 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d66505a1-a5af-4565-b177-ab0ea765730c-serving-cert\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.607201 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/256a3e7e-168f-4aaf-81dd-0e4da35fcccc-proxy-tls\") pod \"machine-config-controller-f9cdd68f7-v8gqb\" (UID: \"256a3e7e-168f-4aaf-81dd-0e4da35fcccc\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.607229 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/48229b09-7bab-42bd-83f2-ad2944222630-webhook-certs\") pod \"multus-admission-controller-69db94689b-zr7d6\" (UID: \"48229b09-7bab-42bd-83f2-ad2944222630\") " pod="openshift-multus/multus-admission-controller-69db94689b-zr7d6" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.607264 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/d66505a1-a5af-4565-b177-ab0ea765730c-etcd-service-ca\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.607290 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/d66505a1-a5af-4565-b177-ab0ea765730c-etcd-ca\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.607335 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/32a41280-9c1f-40b4-85cf-3d18b87e6d55-config\") pod \"kube-storage-version-migrator-operator-565b79b866-5vp2x\" (UID: \"32a41280-9c1f-40b4-85cf-3d18b87e6d55\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.607359 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/f36beec6-1904-41d7-bb02-d83e22db2c5a-signing-cabundle\") pod \"service-ca-74545575db-bv8pp\" (UID: \"f36beec6-1904-41d7-bb02-d83e22db2c5a\") " pod="openshift-service-ca/service-ca-74545575db-bv8pp" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.607377 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c511f97-df39-47bb-b5e9-58ed11fc3263-config-volume\") pod \"dns-default-9wphq\" (UID: \"1c511f97-df39-47bb-b5e9-58ed11fc3263\") " pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.607398 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/503cdef7-1c26-4522-a0d3-09a28bad0340-config\") pod \"openshift-kube-scheduler-operator-54f497555d-nm82k\" (UID: \"503cdef7-1c26-4522-a0d3-09a28bad0340\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.607431 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/f36beec6-1904-41d7-bb02-d83e22db2c5a-signing-key\") pod \"service-ca-74545575db-bv8pp\" (UID: \"f36beec6-1904-41d7-bb02-d83e22db2c5a\") " pod="openshift-service-ca/service-ca-74545575db-bv8pp" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.607449 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0635667-b4d3-43fb-b783-841a7bf96457-config\") pod \"kube-controller-manager-operator-69d5f845f8-pnrbz\" (UID: \"d0635667-b4d3-43fb-b783-841a7bf96457\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.607476 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/503cdef7-1c26-4522-a0d3-09a28bad0340-serving-cert\") pod \"openshift-kube-scheduler-operator-54f497555d-nm82k\" (UID: \"503cdef7-1c26-4522-a0d3-09a28bad0340\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.607588 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-marketplace-trusted-ca\") pod \"marketplace-operator-547dbd544d-qljgk\" (UID: \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.607649 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/71f792d7-ae36-482e-87f3-fc30cfa91377-images\") pod \"machine-config-operator-67c9d58cbb-m9c4q\" (UID: \"71f792d7-ae36-482e-87f3-fc30cfa91377\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.607683 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1c511f97-df39-47bb-b5e9-58ed11fc3263-metrics-tls\") pod \"dns-default-9wphq\" (UID: \"1c511f97-df39-47bb-b5e9-58ed11fc3263\") " pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.607702 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a4715539-c6a4-4c6f-9380-73fae6de4fe0-kube-api-access\") pod \"kube-apiserver-operator-575994946d-pstq8\" (UID: \"a4715539-c6a4-4c6f-9380-73fae6de4fe0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.608672 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c511f97-df39-47bb-b5e9-58ed11fc3263-config-volume\") pod \"dns-default-9wphq\" (UID: \"1c511f97-df39-47bb-b5e9-58ed11fc3263\") " pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.608816 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/503cdef7-1c26-4522-a0d3-09a28bad0340-config\") pod \"openshift-kube-scheduler-operator-54f497555d-nm82k\" (UID: \"503cdef7-1c26-4522-a0d3-09a28bad0340\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.609004 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.108951484 +0000 UTC m=+123.181556971 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.610968 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d0635667-b4d3-43fb-b783-841a7bf96457-config\") pod \"kube-controller-manager-operator-69d5f845f8-pnrbz\" (UID: \"d0635667-b4d3-43fb-b783-841a7bf96457\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.614261 5113 patch_prober.go:28] interesting pod/console-operator-67c89758df-qh9jw container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/readyz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.614340 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-67c89758df-qh9jw" podUID="fde32e16-bb98-4b8e-9e25-fdaf5b8df655" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/readyz\": dial tcp 10.217.0.13:8443: connect: connection refused" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.616876 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/d66505a1-a5af-4565-b177-ab0ea765730c-etcd-service-ca\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.618016 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/d66505a1-a5af-4565-b177-ab0ea765730c-etcd-ca\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.619309 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/71f792d7-ae36-482e-87f3-fc30cfa91377-images\") pod \"machine-config-operator-67c9d58cbb-m9c4q\" (UID: \"71f792d7-ae36-482e-87f3-fc30cfa91377\") " pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.619587 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/503cdef7-1c26-4522-a0d3-09a28bad0340-serving-cert\") pod \"openshift-kube-scheduler-operator-54f497555d-nm82k\" (UID: \"503cdef7-1c26-4522-a0d3-09a28bad0340\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.619657 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/48229b09-7bab-42bd-83f2-ad2944222630-webhook-certs\") pod \"multus-admission-controller-69db94689b-zr7d6\" (UID: \"48229b09-7bab-42bd-83f2-ad2944222630\") " pod="openshift-multus/multus-admission-controller-69db94689b-zr7d6" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.620431 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/f36beec6-1904-41d7-bb02-d83e22db2c5a-signing-cabundle\") pod \"service-ca-74545575db-bv8pp\" (UID: \"f36beec6-1904-41d7-bb02-d83e22db2c5a\") " pod="openshift-service-ca/service-ca-74545575db-bv8pp" Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.620779 5113 projected.go:289] Couldn't get configMap openshift-operator-lifecycle-manager/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.620943 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/256a3e7e-168f-4aaf-81dd-0e4da35fcccc-proxy-tls\") pod \"machine-config-controller-f9cdd68f7-v8gqb\" (UID: \"256a3e7e-168f-4aaf-81dd-0e4da35fcccc\") " pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.621491 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/32a41280-9c1f-40b4-85cf-3d18b87e6d55-config\") pod \"kube-storage-version-migrator-operator-565b79b866-5vp2x\" (UID: \"32a41280-9c1f-40b4-85cf-3d18b87e6d55\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.622808 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1c511f97-df39-47bb-b5e9-58ed11fc3263-metrics-tls\") pod \"dns-default-9wphq\" (UID: \"1c511f97-df39-47bb-b5e9-58ed11fc3263\") " pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.623191 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.623470 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29495520-b9gz9" event={"ID":"28e0ef1a-f823-4898-90a3-66c67c5f19eb","Type":"ContainerStarted","Data":"1ed56093ec350e5ae16ceb54a0bb322604ae288e2ad280a268a7994b7909d637"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.623542 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-marketplace-trusted-ca\") pod \"marketplace-operator-547dbd544d-qljgk\" (UID: \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.627152 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d66505a1-a5af-4565-b177-ab0ea765730c-serving-cert\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.627741 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/f36beec6-1904-41d7-bb02-d83e22db2c5a-signing-key\") pod \"service-ca-74545575db-bv8pp\" (UID: \"f36beec6-1904-41d7-bb02-d83e22db2c5a\") " pod="openshift-service-ca/service-ca-74545575db-bv8pp" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.635651 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" event={"ID":"ffde9a75-3edf-462f-af90-c312c4f05986","Type":"ContainerStarted","Data":"82396be9855059532497cebfeea008ef1c5c92af1e3c7a00558a4c6b91726aaf"} Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.647005 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress\"/\"kube-root-ca.crt\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.652387 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a4715539-c6a4-4c6f-9380-73fae6de4fe0-kube-api-access\") pod \"kube-apiserver-operator-575994946d-pstq8\" (UID: \"a4715539-c6a4-4c6f-9380-73fae6de4fe0\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.682228 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.702732 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-samples-operator\"/\"cluster-samples-operator-dockercfg-jmhxf\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.709604 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.710067 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.210046158 +0000 UTC m=+123.282651535 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.722027 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"kube-root-ca.crt\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.741634 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operator-lifecycle-manager\"/\"kube-root-ca.crt\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.747278 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.752657 5113 patch_prober.go:28] interesting pod/controller-manager-65b6cccf98-8rbrn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.752754 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" podUID="ffde9a75-3edf-462f-af90-c312c4f05986" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.752995 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4"] Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.761237 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.781738 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.792186 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d0635667-b4d3-43fb-b783-841a7bf96457-kube-api-access\") pod \"kube-controller-manager-operator-69d5f845f8-pnrbz\" (UID: \"d0635667-b4d3-43fb-b783-841a7bf96457\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.801584 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.810747 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.812230 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.312206505 +0000 UTC m=+123.384811882 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.822621 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-server-dockercfg-dzw6b\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.829369 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-smc9m" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.847993 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-scheduler-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.860484 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/503cdef7-1c26-4522-a0d3-09a28bad0340-kube-api-access\") pod \"openshift-kube-scheduler-operator-54f497555d-nm82k\" (UID: \"503cdef7-1c26-4522-a0d3-09a28bad0340\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.860602 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-marketplace\"/\"kube-root-ca.crt\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.881104 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"hostpath-provisioner\"/\"kube-root-ca.crt\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.900912 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator\"/\"kube-root-ca.crt\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.913087 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.918275 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.418237443 +0000 UTC m=+123.490842820 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.920545 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress\"/\"openshift-service-ca.crt\"" Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.923809 5113 projected.go:194] Error preparing data for projected volume kube-api-access-n9ld2 for pod openshift-ingress/router-default-68cf44c8b8-pdqxh: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.923928 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-kube-api-access-n9ld2 podName:8547dc44-d12a-4cf9-a12f-1a1f2dcb3433 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.423900559 +0000 UTC m=+123.496505936 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-n9ld2" (UniqueName: "kubernetes.io/projected/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-kube-api-access-n9ld2") pod "router-default-68cf44c8b8-pdqxh" (UID: "8547dc44-d12a-4cf9-a12f-1a1f2dcb3433") : failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.941511 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca\"/\"kube-root-ca.crt\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.962265 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.963271 5113 projected.go:194] Error preparing data for projected volume kube-api-access-2mchh for pod openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: E0130 00:11:42.963444 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/88af8dd9-5994-4276-a701-8dc0af32b4bb-kube-api-access-2mchh podName:88af8dd9-5994-4276-a701-8dc0af32b4bb nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.463380357 +0000 UTC m=+123.535985734 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-2mchh" (UniqueName: "kubernetes.io/projected/88af8dd9-5994-4276-a701-8dc0af32b4bb-kube-api-access-2mchh") pod "openshift-apiserver-operator-846cbfc458-zmdjn" (UID: "88af8dd9-5994-4276-a701-8dc0af32b4bb") : failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.980706 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"control-plane-machine-set-operator-dockercfg-gnx66\"" Jan 30 00:11:42 crc kubenswrapper[5113]: I0130 00:11:42.985079 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-lxlzd" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.001694 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"openshift-service-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.009645 5113 projected.go:194] Error preparing data for projected volume kube-api-access-c9phx for pod openshift-authentication/oauth-openshift-66458b6674-65wnm: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.009837 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/62474d91-1e1c-48ee-b28d-bfa517692c72-kube-api-access-c9phx podName:62474d91-1e1c-48ee-b28d-bfa517692c72 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.509796771 +0000 UTC m=+123.582402158 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-c9phx" (UniqueName: "kubernetes.io/projected/62474d91-1e1c-48ee-b28d-bfa517692c72-kube-api-access-c9phx") pod "oauth-openshift-66458b6674-65wnm" (UID: "62474d91-1e1c-48ee-b28d-bfa517692c72") : failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.019641 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.019912 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.519885234 +0000 UTC m=+123.592490611 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.020970 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.021403 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.521390061 +0000 UTC m=+123.593995438 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.021670 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-canary\"/\"kube-root-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.040761 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"kube-root-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.062112 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operator-lifecycle-manager\"/\"openshift-service-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.069679 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnvxp\" (UniqueName: \"kubernetes.io/projected/de9d205a-be35-4bef-8883-3c11fddc1c8a-kube-api-access-tnvxp\") pod \"packageserver-7d4fc7d867-v7z7g\" (UID: \"de9d205a-be35-4bef-8883-3c11fddc1c8a\") " pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.069701 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbrpx\" (UniqueName: \"kubernetes.io/projected/fd060364-4a55-4af3-b560-0530d43641d0-kube-api-access-lbrpx\") pod \"catalog-operator-75ff9f647d-x975j\" (UID: \"fd060364-4a55-4af3-b560-0530d43641d0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.071058 5113 projected.go:194] Error preparing data for projected volume kube-api-access-b4c2d for pod openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg: failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.076069 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-sz7cq\" (UniqueName: \"kubernetes.io/projected/cba9a802-5da5-472a-89eb-8ef391936cb8-kube-api-access-sz7cq\") pod \"olm-operator-5cdf44d969-vcjpp\" (UID: \"cba9a802-5da5-472a-89eb-8ef391936cb8\") " pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.076598 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtrw5\" (UniqueName: \"kubernetes.io/projected/61eb8992-aa29-40df-bd7b-c3e937249c67-kube-api-access-mtrw5\") pod \"package-server-manager-77f986bd66-nvsp7\" (UID: \"61eb8992-aa29-40df-bd7b-c3e937249c67\") " pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-nvsp7" Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.078030 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-kube-api-access-b4c2d podName:89390c7a-b6e2-43fb-8b63-4df42bbbd9b5 nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.577996171 +0000 UTC m=+123.650601548 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-b4c2d" (UniqueName: "kubernetes.io/projected/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-kube-api-access-b4c2d") pod "collect-profiles-29495520-qdtrg" (UID: "89390c7a-b6e2-43fb-8b63-4df42bbbd9b5") : failed to sync configmap cache: timed out waiting for the condition Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.102270 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-operator-dockercfg-sw6nc\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.106469 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.121373 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.123614 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.124268 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.624212079 +0000 UTC m=+123.696817466 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.132145 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfhf2\" (UniqueName: \"kubernetes.io/projected/614dc955-75e1-4543-9e87-e3f4835c927d-kube-api-access-gfhf2\") pod \"service-ca-operator-5b9c976747-pgfzj\" (UID: \"614dc955-75e1-4543-9e87-e3f4835c927d\") " pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-pgfzj" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.141575 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"multus-ac-dockercfg-gj7jx\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.142575 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-69db94689b-zr7d6" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.164394 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-controller-dockercfg-xnj77\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.173747 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.182188 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.192757 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fjxh\" (UniqueName: \"kubernetes.io/projected/32a41280-9c1f-40b4-85cf-3d18b87e6d55-kube-api-access-5fjxh\") pod \"kube-storage-version-migrator-operator-565b79b866-5vp2x\" (UID: \"32a41280-9c1f-40b4-85cf-3d18b87e6d55\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.201426 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns\"/\"dns-dockercfg-kpvmz\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.209855 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.222024 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-marketplace\"/\"openshift-service-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.226262 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.226693 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.726677386 +0000 UTC m=+123.799282763 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.229653 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cj4s\" (UniqueName: \"kubernetes.io/projected/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-kube-api-access-8cj4s\") pod \"marketplace-operator-547dbd544d-qljgk\" (UID: \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.231278 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-lxlzd"] Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.250426 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"hostpath-provisioner\"/\"openshift-service-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5113]: W0130 00:11:43.271788 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod17999f27_0d6c_46f2_82b4_a07bec4b1021.slice/crio-cc83c8c15de8635c48834e8b6d60d1a5679211abb04f472f53fe0b980bf3c7b6 WatchSource:0}: Error finding container cc83c8c15de8635c48834e8b6d60d1a5679211abb04f472f53fe0b980bf3c7b6: Status 404 returned error can't find the container with id cc83c8c15de8635c48834e8b6d60d1a5679211abb04f472f53fe0b980bf3c7b6 Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.272014 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.279459 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhd9x\" (UniqueName: \"kubernetes.io/projected/abc0d911-0769-4cbb-8a02-d5ced71ed5b5-kube-api-access-dhd9x\") pod \"csi-hostpathplugin-l5rrt\" (UID: \"abc0d911-0769-4cbb-8a02-d5ced71ed5b5\") " pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.282059 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkwrr\" (UniqueName: \"kubernetes.io/projected/3634960c-b798-480d-b652-3ffca7e9cf70-kube-api-access-vkwrr\") pod \"migrator-866fcbc849-ng6js\" (UID: \"3634960c-b798-480d-b652-3ffca7e9cf70\") " pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-ng6js" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.291590 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-dockercfg-bf7fj\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.296475 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.301959 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca\"/\"openshift-service-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.313620 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-phnqd\" (UniqueName: \"kubernetes.io/projected/f36beec6-1904-41d7-bb02-d83e22db2c5a-kube-api-access-phnqd\") pod \"service-ca-74545575db-bv8pp\" (UID: \"f36beec6-1904-41d7-bb02-d83e22db2c5a\") " pod="openshift-service-ca/service-ca-74545575db-bv8pp" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.325347 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-dockercfg-tnfx9\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.328482 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.328533 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.328747 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.828653667 +0000 UTC m=+123.901259044 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.329442 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.330042 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.83002503 +0000 UTC m=+123.902630407 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.342226 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-dockercfg-2wbn2\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.345532 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.362919 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-canary\"/\"openshift-service-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.380767 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8kgz\" (UniqueName: \"kubernetes.io/projected/845806fc-89db-419b-9f4c-b9263fa93527-kube-api-access-s8kgz\") pod \"ingress-canary-hzktp\" (UID: \"845806fc-89db-419b-9f4c-b9263fa93527\") " pod="openshift-ingress-canary/ingress-canary-hzktp" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.385684 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.401376 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8rps\" (UniqueName: \"kubernetes.io/projected/d66505a1-a5af-4565-b177-ab0ea765730c-kube-api-access-x8rps\") pod \"etcd-operator-69b85846b6-t984r\" (UID: \"d66505a1-a5af-4565-b177-ab0ea765730c\") " pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.422714 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-storage-version-migrator\"/\"kube-storage-version-migrator-sa-dockercfg-kknhg\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.434197 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-ng6js" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.434980 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.435419 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-n9ld2\" (UniqueName: \"kubernetes.io/projected/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-kube-api-access-n9ld2\") pod \"router-default-68cf44c8b8-pdqxh\" (UID: \"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433\") " pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.435465 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:43.935438509 +0000 UTC m=+124.008049436 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.445366 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"olm-operator-serviceaccount-dockercfg-4gqzj\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.446696 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.449378 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-nvsp7" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.452036 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q"] Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.452878 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.456691 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.461873 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-dockercfg-2h6bs\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.464455 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.465994 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9ld2\" (UniqueName: \"kubernetes.io/projected/8547dc44-d12a-4cf9-a12f-1a1f2dcb3433-kube-api-access-n9ld2\") pod \"router-default-68cf44c8b8-pdqxh\" (UID: \"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433\") " pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.481561 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca-operator\"/\"service-ca-operator-dockercfg-bjqfd\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.485402 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-pgfzj" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.529023 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"marketplace-operator-dockercfg-2cfkp\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.529621 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.541594 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-2mchh\" (UniqueName: \"kubernetes.io/projected/88af8dd9-5994-4276-a701-8dc0af32b4bb-kube-api-access-2mchh\") pod \"openshift-apiserver-operator-846cbfc458-zmdjn\" (UID: \"88af8dd9-5994-4276-a701-8dc0af32b4bb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.541646 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-c9phx\" (UniqueName: \"kubernetes.io/projected/62474d91-1e1c-48ee-b28d-bfa517692c72-kube-api-access-c9phx\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.541676 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.546002 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca\"/\"service-ca-dockercfg-bgxvm\"" Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.546771 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.046754521 +0000 UTC m=+124.119359898 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.550284 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-74545575db-bv8pp" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.552461 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9phx\" (UniqueName: \"kubernetes.io/projected/62474d91-1e1c-48ee-b28d-bfa517692c72-kube-api-access-c9phx\") pod \"oauth-openshift-66458b6674-65wnm\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.553042 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mchh\" (UniqueName: \"kubernetes.io/projected/88af8dd9-5994-4276-a701-8dc0af32b4bb-kube-api-access-2mchh\") pod \"openshift-apiserver-operator-846cbfc458-zmdjn\" (UID: \"88af8dd9-5994-4276-a701-8dc0af32b4bb\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.565951 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"hostpath-provisioner\"/\"csi-hostpath-provisioner-sa-dockercfg-7dcws\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.573324 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.579273 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-69db94689b-zr7d6"] Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.581698 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-canary\"/\"default-dockercfg-9pgs7\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.586092 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-hzktp" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.615044 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb"] Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.622748 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-dockercfg-6c46w\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.629590 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.641754 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-dockercfg-kw8fx\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.643479 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.655734 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" event={"ID":"44725449-ac50-4ae0-935a-4d70c1a921f1","Type":"ContainerStarted","Data":"3ee7b4bf2bff569b6eb2c7212e5f8dec4e5e976bcb24746cfa4aab75908fad98"} Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.662628 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-etcd-operator\"/\"etcd-operator-dockercfg-4vdnc\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.663720 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.663873 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.163850742 +0000 UTC m=+124.236456119 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.664314 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.664601 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-b4c2d\" (UniqueName: \"kubernetes.io/projected/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-kube-api-access-b4c2d\") pod \"collect-profiles-29495520-qdtrg\" (UID: \"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.665364 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.165347718 +0000 UTC m=+124.237953095 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.670510 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.671489 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4c2d\" (UniqueName: \"kubernetes.io/projected/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-kube-api-access-b4c2d\") pod \"collect-profiles-29495520-qdtrg\" (UID: \"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.685322 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" event={"ID":"37c8062b-4496-46f0-9562-9f9d27740557","Type":"ContainerStarted","Data":"0d4f5e40cb3f8e2647f70084153052c3081c360cca93e43b0055646fb421413f"} Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.706755 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" event={"ID":"2554dafb-e152-489f-a585-bfe5638c0b82","Type":"ContainerStarted","Data":"f7dccaa4091a7cd81b7b954689ee80be5d60558d8a49f8f62984bddce60db601"} Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.706939 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.740890 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-799b87ffcd-dbqgp" event={"ID":"d334727d-3c8b-4f75-a3ea-a4b537fe480c","Type":"ContainerStarted","Data":"49d3dcbc55ff551f259d425cce22fe5e3527c9e4ce7b60457b3c256246d02a80"} Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.743827 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-64d44f6ddf-6z7rp" podStartSLOduration=101.743808198 podStartE2EDuration="1m41.743808198s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:43.743794688 +0000 UTC m=+123.816400065" watchObservedRunningTime="2026-01-30 00:11:43.743808198 +0000 UTC m=+123.816413595" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.768443 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.768676 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.268648172 +0000 UTC m=+124.341253549 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.769400 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.770125 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.270097366 +0000 UTC m=+124.342702743 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.791535 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.791977 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.797600 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k"] Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.801407 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8"] Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.801546 5113 patch_prober.go:28] interesting pod/apiserver-8596bd845d-9swvg container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.9:8443/livez\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.801587 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" podUID="44725449-ac50-4ae0-935a-4d70c1a921f1" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.9:8443/livez\": dial tcp 10.217.0.9:8443: connect: connection refused" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.814135 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-smc9m" event={"ID":"b181d297-81e3-4d12-8537-1ee3a42213ce","Type":"ContainerStarted","Data":"ff09336646554e52cc3ba7d06e2c5a51c29de45bbd01188bc5eb26d1762cc787"} Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.823856 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" event={"ID":"d245ee6c-4b68-41b6-b516-38a882666394","Type":"ContainerStarted","Data":"631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3"} Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.824161 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.827268 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" event={"ID":"71f792d7-ae36-482e-87f3-fc30cfa91377","Type":"ContainerStarted","Data":"10f323f2f3427bb11c65c7426bd42e620f8d4927b801009a9b70372fc33682c2"} Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.829368 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-lxlzd" event={"ID":"17999f27-0d6c-46f2-82b4-a07bec4b1021","Type":"ContainerStarted","Data":"cc83c8c15de8635c48834e8b6d60d1a5679211abb04f472f53fe0b980bf3c7b6"} Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.835144 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" event={"ID":"70b23cc7-4205-4b33-a0a7-a30ee9d4ff7c","Type":"ContainerStarted","Data":"e2ef5061fbb0a6cd23e8ef32728e3f7d9f32ba3ec4a6b2179a60cd8175d86033"} Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.871191 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.871511 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4" event={"ID":"c4af1b2c-aad9-48c2-b1d7-36cd069c556d","Type":"ContainerStarted","Data":"0a9877c9e0af7f89e020c782a26a639aae2c66f5dc370924b7229ad53462c9e1"} Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.877364 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.377340752 +0000 UTC m=+124.449946129 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.878711 5113 patch_prober.go:28] interesting pod/console-operator-67c89758df-qh9jw container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/readyz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.878767 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-67c89758df-qh9jw" podUID="fde32e16-bb98-4b8e-9e25-fdaf5b8df655" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/readyz\": dial tcp 10.217.0.13:8443: connect: connection refused" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.879409 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"oauth-openshift-dockercfg-d2bf2\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.881462 5113 patch_prober.go:28] interesting pod/controller-manager-65b6cccf98-8rbrn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.881503 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" podUID="ffde9a75-3edf-462f-af90-c312c4f05986" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.881569 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-console/downloads-747b44746d-kxkvb" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.881591 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.881637 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.885261 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-9wphq"] Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.886282 5113 patch_prober.go:28] interesting pod/downloads-747b44746d-kxkvb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.886330 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-kxkvb" podUID="d6483c17-196a-4e41-8950-46d60c5505c9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.903681 5113 patch_prober.go:28] interesting pod/route-controller-manager-776cdc94d6-99ddj container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.903747 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" podUID="4669eb3c-24d5-4643-91d1-de96326757fa" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.927134 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.952683 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"collect-profiles-dockercfg-vfqp6\"" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.955874 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.978823 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:43 crc kubenswrapper[5113]: I0130 00:11:43.982798 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz"] Jan 30 00:11:43 crc kubenswrapper[5113]: E0130 00:11:43.991246 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.491231434 +0000 UTC m=+124.563836811 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.058544 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-7f5c659b84-twdjp" podStartSLOduration=101.058505856 podStartE2EDuration="1m41.058505856s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:44.05767474 +0000 UTC m=+124.130280127" watchObservedRunningTime="2026-01-30 00:11:44.058505856 +0000 UTC m=+124.131111233" Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.071005 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-pruner-29495520-b9gz9" podStartSLOduration=102.070982805 podStartE2EDuration="1m42.070982805s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:44.026112369 +0000 UTC m=+124.098717746" watchObservedRunningTime="2026-01-30 00:11:44.070982805 +0000 UTC m=+124.143588182" Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.086202 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:44 crc kubenswrapper[5113]: E0130 00:11:44.086800 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.586778536 +0000 UTC m=+124.659383923 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.103432 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-86c45576b9-trfl4" podStartSLOduration=101.103412753 podStartE2EDuration="1m41.103412753s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:44.10299224 +0000 UTC m=+124.175597637" watchObservedRunningTime="2026-01-30 00:11:44.103412753 +0000 UTC m=+124.176018130" Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.159072 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j"] Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.178354 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-5b9c976747-pgfzj"] Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.187794 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:44 crc kubenswrapper[5113]: E0130 00:11:44.188112 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.688097697 +0000 UTC m=+124.760703074 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5113]: W0130 00:11:44.195313 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod503cdef7_1c26_4522_a0d3_09a28bad0340.slice/crio-126147008ef70130fdeb0fa6cdb1c122daa53a9449b5a1bbda62c17d4387f067 WatchSource:0}: Error finding container 126147008ef70130fdeb0fa6cdb1c122daa53a9449b5a1bbda62c17d4387f067: Status 404 returned error can't find the container with id 126147008ef70130fdeb0fa6cdb1c122daa53a9449b5a1bbda62c17d4387f067 Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.288885 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:44 crc kubenswrapper[5113]: E0130 00:11:44.289105 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.789073467 +0000 UTC m=+124.861678844 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.290415 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:44 crc kubenswrapper[5113]: E0130 00:11:44.290909 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.790891853 +0000 UTC m=+124.863497230 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.296246 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-54c688565-x4v7p" podStartSLOduration=102.296224759 podStartE2EDuration="1m42.296224759s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:44.265103101 +0000 UTC m=+124.337708478" watchObservedRunningTime="2026-01-30 00:11:44.296224759 +0000 UTC m=+124.368830136" Jan 30 00:11:44 crc kubenswrapper[5113]: W0130 00:11:44.305887 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod614dc955_75e1_4543_9e87_e3f4835c927d.slice/crio-53331dcee194a1b8d31c1f8d4be88cc622124c9d8335911b3fb137c347b26ee2 WatchSource:0}: Error finding container 53331dcee194a1b8d31c1f8d4be88cc622124c9d8335911b3fb137c347b26ee2: Status 404 returned error can't find the container with id 53331dcee194a1b8d31c1f8d4be88cc622124c9d8335911b3fb137c347b26ee2 Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.392690 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:44 crc kubenswrapper[5113]: E0130 00:11:44.393231 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.893209585 +0000 UTC m=+124.965814962 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.497182 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:44 crc kubenswrapper[5113]: E0130 00:11:44.497686 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:44.997668114 +0000 UTC m=+125.070273491 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.528901 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-686468bdd5-cddd8" podStartSLOduration=102.528883225 podStartE2EDuration="1m42.528883225s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:44.474862775 +0000 UTC m=+124.547468162" watchObservedRunningTime="2026-01-30 00:11:44.528883225 +0000 UTC m=+124.601488602" Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.578897 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g"] Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.608190 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:44 crc kubenswrapper[5113]: E0130 00:11:44.608534 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.108502542 +0000 UTC m=+125.181107919 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.619939 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-747b44746d-kxkvb" podStartSLOduration=102.619915186 podStartE2EDuration="1m42.619915186s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:44.594220688 +0000 UTC m=+124.666826075" watchObservedRunningTime="2026-01-30 00:11:44.619915186 +0000 UTC m=+124.692520563" Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.620492 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x"] Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.684891 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-755bb95488-krhlw" podStartSLOduration=101.684846585 podStartE2EDuration="1m41.684846585s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:44.673334227 +0000 UTC m=+124.745939614" watchObservedRunningTime="2026-01-30 00:11:44.684846585 +0000 UTC m=+124.757451962" Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.702787 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-nvsp7"] Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.713267 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:44 crc kubenswrapper[5113]: E0130 00:11:44.714001 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.213988922 +0000 UTC m=+125.286594299 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.811287 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-67c89758df-qh9jw" podStartSLOduration=102.811266477 podStartE2EDuration="1m42.811266477s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:44.743248462 +0000 UTC m=+124.815853839" watchObservedRunningTime="2026-01-30 00:11:44.811266477 +0000 UTC m=+124.883871844" Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.816242 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:44 crc kubenswrapper[5113]: E0130 00:11:44.816921 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.316895502 +0000 UTC m=+125.389500879 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.919159 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:44 crc kubenswrapper[5113]: E0130 00:11:44.919560 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.419547795 +0000 UTC m=+125.492153172 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.963350 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" event={"ID":"503cdef7-1c26-4522-a0d3-09a28bad0340","Type":"ContainerStarted","Data":"126147008ef70130fdeb0fa6cdb1c122daa53a9449b5a1bbda62c17d4387f067"} Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.963439 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9wphq" event={"ID":"1c511f97-df39-47bb-b5e9-58ed11fc3263","Type":"ContainerStarted","Data":"77fa843c5e31c82fedd5aeaa484f258b96e8851230ccf5aed9a01571b80bcb59"} Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.963470 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-866fcbc849-ng6js"] Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.968853 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb" event={"ID":"256a3e7e-168f-4aaf-81dd-0e4da35fcccc","Type":"ContainerStarted","Data":"2439061bd255381af1c4eeeb6fc2f8034d38b34f8870d64c62c40583143d3f20"} Jan 30 00:11:44 crc kubenswrapper[5113]: I0130 00:11:44.984377 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-69db94689b-zr7d6" event={"ID":"48229b09-7bab-42bd-83f2-ad2944222630","Type":"ContainerStarted","Data":"31a24d140c22b72718ecadc328eb6db425ac9ebf39304ca534690f5769ccf3ef"} Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:44.999306 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x" event={"ID":"32a41280-9c1f-40b4-85cf-3d18b87e6d55","Type":"ContainerStarted","Data":"5ba113dc999aecb05eda55edd7e836d0ef2d2f2088cfa598bb00910e86fd4bf5"} Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.017555 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" event={"ID":"d0635667-b4d3-43fb-b783-841a7bf96457","Type":"ContainerStarted","Data":"b67174ad9acc7858a51b82d0ebf3e28b96eeb8044bde94b6922a140ffba7ba41"} Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.044491 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:45 crc kubenswrapper[5113]: E0130 00:11:45.046018 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.545946266 +0000 UTC m=+125.618551643 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.061363 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" event={"ID":"37c8062b-4496-46f0-9562-9f9d27740557","Type":"ContainerStarted","Data":"496ec0f7d1b3040201c01a71869ed8845a6749eee2eccb8c1649e7dd0ad8cd26"} Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.065548 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" event={"ID":"a4715539-c6a4-4c6f-9380-73fae6de4fe0","Type":"ContainerStarted","Data":"acefa028f5052c4fc38e90f2fca87294e52233886a3097f955dbed8b7c235748"} Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.070489 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" event={"ID":"fd060364-4a55-4af3-b560-0530d43641d0","Type":"ContainerStarted","Data":"f9c919530629c4e7b11dd47e145736f7f05425e9536ea2d5692b87c8f0fe4095"} Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.089381 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-smc9m" event={"ID":"b181d297-81e3-4d12-8537-1ee3a42213ce","Type":"ContainerStarted","Data":"b9ed4323ea36aa3435b640a7c0ce275c202a9cef620e8027ca878d1c004392b4"} Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.121149 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-lxlzd" event={"ID":"17999f27-0d6c-46f2-82b4-a07bec4b1021","Type":"ContainerStarted","Data":"ab01d11228aa85570a3a4734492333d95eaed5140e4c243f7afee261de0745d9"} Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.132128 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" event={"ID":"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433","Type":"ContainerStarted","Data":"502f791c1c93048b954e6750f889cb2d1362538663b0208cef09f95d0a88c1b2"} Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.146816 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:45 crc kubenswrapper[5113]: E0130 00:11:45.147222 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.647208825 +0000 UTC m=+125.719814202 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.166701 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4" event={"ID":"c4af1b2c-aad9-48c2-b1d7-36cd069c556d","Type":"ContainerStarted","Data":"e44c1c0ebc71f2ba3040114c571bf17e03bbb77c1445501852ae08ae57ffbf72"} Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.180978 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-pgfzj" event={"ID":"614dc955-75e1-4543-9e87-e3f4835c927d","Type":"ContainerStarted","Data":"53331dcee194a1b8d31c1f8d4be88cc622124c9d8335911b3fb137c347b26ee2"} Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.182048 5113 ???:1] "http: TLS handshake error from 192.168.126.11:57522: no serving certificate available for the kubelet" Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.183277 5113 patch_prober.go:28] interesting pod/downloads-747b44746d-kxkvb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.183345 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-kxkvb" podUID="d6483c17-196a-4e41-8950-46d60c5505c9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.183967 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" podStartSLOduration=103.183953938 podStartE2EDuration="1m43.183953938s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:45.143440777 +0000 UTC m=+125.216046154" watchObservedRunningTime="2026-01-30 00:11:45.183953938 +0000 UTC m=+125.256559315" Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.248000 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:45 crc kubenswrapper[5113]: E0130 00:11:45.251258 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.751238341 +0000 UTC m=+125.823843718 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.305740 5113 ???:1] "http: TLS handshake error from 192.168.126.11:57538: no serving certificate available for the kubelet" Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.344694 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" podStartSLOduration=102.344654706 podStartE2EDuration="1m42.344654706s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:45.338183285 +0000 UTC m=+125.410788662" watchObservedRunningTime="2026-01-30 00:11:45.344654706 +0000 UTC m=+125.417260083" Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.364990 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:45 crc kubenswrapper[5113]: E0130 00:11:45.367985 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.867967051 +0000 UTC m=+125.940572428 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.382634 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.405286 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.425490 5113 ???:1] "http: TLS handshake error from 192.168.126.11:57552: no serving certificate available for the kubelet" Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.465870 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:45 crc kubenswrapper[5113]: E0130 00:11:45.466209 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:45.966190906 +0000 UTC m=+126.038796283 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.565916 5113 ???:1] "http: TLS handshake error from 192.168.126.11:51740: no serving certificate available for the kubelet" Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.569064 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:45 crc kubenswrapper[5113]: E0130 00:11:45.569693 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.069673774 +0000 UTC m=+126.142279151 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.596240 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.652641 5113 ???:1] "http: TLS handshake error from 192.168.126.11:51744: no serving certificate available for the kubelet" Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.696911 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:45 crc kubenswrapper[5113]: E0130 00:11:45.699400 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.199354188 +0000 UTC m=+126.271959565 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.709213 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-75ffdb6fcd-lxlzd" podStartSLOduration=102.709181763 podStartE2EDuration="1m42.709181763s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:45.672197993 +0000 UTC m=+125.744803400" watchObservedRunningTime="2026-01-30 00:11:45.709181763 +0000 UTC m=+125.781787140" Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.726496 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:45 crc kubenswrapper[5113]: E0130 00:11:45.727137 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.227116931 +0000 UTC m=+126.299722308 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.799939 5113 ???:1] "http: TLS handshake error from 192.168.126.11:51756: no serving certificate available for the kubelet" Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.826377 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" podStartSLOduration=102.826349968 podStartE2EDuration="1m42.826349968s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:45.783484854 +0000 UTC m=+125.856090231" watchObservedRunningTime="2026-01-30 00:11:45.826349968 +0000 UTC m=+125.898955355" Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.828643 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:45 crc kubenswrapper[5113]: E0130 00:11:45.829121 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.329096833 +0000 UTC m=+126.401702210 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.844474 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4" podStartSLOduration=103.84443835 podStartE2EDuration="1m43.84443835s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:45.826531822 +0000 UTC m=+125.899137189" watchObservedRunningTime="2026-01-30 00:11:45.84443835 +0000 UTC m=+125.917043727" Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.891179 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-799b87ffcd-dbqgp" podStartSLOduration=103.891145242 podStartE2EDuration="1m43.891145242s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:45.880256193 +0000 UTC m=+125.952861570" watchObservedRunningTime="2026-01-30 00:11:45.891145242 +0000 UTC m=+125.963750619" Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.895460 5113 ???:1] "http: TLS handshake error from 192.168.126.11:51758: no serving certificate available for the kubelet" Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.930785 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:45 crc kubenswrapper[5113]: E0130 00:11:45.931255 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.431237999 +0000 UTC m=+126.503843376 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:45 crc kubenswrapper[5113]: I0130 00:11:45.938337 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" podStartSLOduration=103.93831896 podStartE2EDuration="1m43.93831896s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:45.937925438 +0000 UTC m=+126.010530815" watchObservedRunningTime="2026-01-30 00:11:45.93831896 +0000 UTC m=+126.010924327" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.021934 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-smc9m" podStartSLOduration=8.021906819 podStartE2EDuration="8.021906819s" podCreationTimestamp="2026-01-30 00:11:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:45.985549319 +0000 UTC m=+126.058154696" watchObservedRunningTime="2026-01-30 00:11:46.021906819 +0000 UTC m=+126.094512196" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.023089 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn"] Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.034286 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:46 crc kubenswrapper[5113]: E0130 00:11:46.034869 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.534845351 +0000 UTC m=+126.607450728 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.047355 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" podStartSLOduration=8.04732356 podStartE2EDuration="8.04732356s" podCreationTimestamp="2026-01-30 00:11:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:46.008154791 +0000 UTC m=+126.080760168" watchObservedRunningTime="2026-01-30 00:11:46.04732356 +0000 UTC m=+126.119928927" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.088279 5113 ???:1] "http: TLS handshake error from 192.168.126.11:51764: no serving certificate available for the kubelet" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.117161 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-6b9cb4dbcf-5cnkq" podStartSLOduration=103.117126841 podStartE2EDuration="1m43.117126841s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:46.050602961 +0000 UTC m=+126.123208358" watchObservedRunningTime="2026-01-30 00:11:46.117126841 +0000 UTC m=+126.189732208" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.143466 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:46 crc kubenswrapper[5113]: E0130 00:11:46.144070 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.644052808 +0000 UTC m=+126.716658175 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.197342 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-74545575db-bv8pp"] Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.234555 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg"] Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.244656 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" podStartSLOduration=103.244627546 podStartE2EDuration="1m43.244627546s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:46.166825986 +0000 UTC m=+126.239431383" watchObservedRunningTime="2026-01-30 00:11:46.244627546 +0000 UTC m=+126.317232923" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.245835 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:46 crc kubenswrapper[5113]: E0130 00:11:46.246345 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.746325868 +0000 UTC m=+126.818931245 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.262031 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-69b85846b6-t984r"] Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.262307 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-hzktp"] Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.288902 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" event={"ID":"71f792d7-ae36-482e-87f3-fc30cfa91377","Type":"ContainerStarted","Data":"466c7dc9971cb015251ad95f1ca70f29f5456a18250eced330209dfb80dd4629"} Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.288960 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" event={"ID":"71f792d7-ae36-482e-87f3-fc30cfa91377","Type":"ContainerStarted","Data":"7c4f60d41f1bd88004d8b74b20dfc221438fb8c95a79713594e651765097d924"} Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.347388 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" event={"ID":"8547dc44-d12a-4cf9-a12f-1a1f2dcb3433","Type":"ContainerStarted","Data":"ac38492d6bef2f224d6cfabd0aec35489be8ebca6dbd6c74a0a6c6e725960ade"} Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.349796 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:46 crc kubenswrapper[5113]: E0130 00:11:46.351065 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.851050446 +0000 UTC m=+126.923655823 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.369975 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-6b564684c8-44zv4" event={"ID":"c4af1b2c-aad9-48c2-b1d7-36cd069c556d","Type":"ContainerStarted","Data":"c7a7927aac07b9d2bee4e51bc351c9c3e30321b068cc761ed25df7e0901cef1f"} Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.400274 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-67c9d58cbb-m9c4q" podStartSLOduration=103.400250827 podStartE2EDuration="1m43.400250827s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:46.388497201 +0000 UTC m=+126.461102578" watchObservedRunningTime="2026-01-30 00:11:46.400250827 +0000 UTC m=+126.472856194" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.401994 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-pgfzj" event={"ID":"614dc955-75e1-4543-9e87-e3f4835c927d","Type":"ContainerStarted","Data":"0df582a9637ede4cb1653b635ce169f8fbbcf0e1604b69941c66edc2bacefead"} Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.453394 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb" event={"ID":"256a3e7e-168f-4aaf-81dd-0e4da35fcccc","Type":"ContainerStarted","Data":"0caf3f111c500f91208decb0f67149c04a1ba930db46e40843e6b012fc6a45db"} Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.453498 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp"] Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.454314 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:46 crc kubenswrapper[5113]: E0130 00:11:46.454791 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.954756351 +0000 UTC m=+127.027361728 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.470228 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:46 crc kubenswrapper[5113]: E0130 00:11:46.470764 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:46.970744478 +0000 UTC m=+127.043349855 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.489962 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-69db94689b-zr7d6" event={"ID":"48229b09-7bab-42bd-83f2-ad2944222630","Type":"ContainerStarted","Data":"2fead64a8b9cacd349f3c025df5429720339c652877eae83155d67483dc7583e"} Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.497182 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-ng6js" event={"ID":"3634960c-b798-480d-b652-3ffca7e9cf70","Type":"ContainerStarted","Data":"793c2641fc7003754e58443dd7322cc49b3576237dd7723fe498322188db1e99"} Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.512995 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x" event={"ID":"32a41280-9c1f-40b4-85cf-3d18b87e6d55","Type":"ContainerStarted","Data":"e4ebb12d79c17a5bb7ac1546554dea86eefb5c90e196ef9c053e215f76a93b6a"} Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.513757 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-66458b6674-65wnm"] Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.533229 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn" event={"ID":"88af8dd9-5994-4276-a701-8dc0af32b4bb","Type":"ContainerStarted","Data":"405918e69bfcb4e068a5b76c005107c3ed64ffb3f4b87a87571436c606284021"} Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.540075 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" podStartSLOduration=103.540057815 podStartE2EDuration="1m43.540057815s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:46.535006817 +0000 UTC m=+126.607612194" watchObservedRunningTime="2026-01-30 00:11:46.540057815 +0000 UTC m=+126.612663192" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.571892 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:46 crc kubenswrapper[5113]: E0130 00:11:46.573405 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.0733675 +0000 UTC m=+127.145972877 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.580702 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" event={"ID":"de9d205a-be35-4bef-8883-3c11fddc1c8a","Type":"ContainerStarted","Data":"de60d2ba45b6b0864cf931f8bd8a437469114f114363b05a1458aa3c30a4703d"} Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.580814 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" event={"ID":"de9d205a-be35-4bef-8883-3c11fddc1c8a","Type":"ContainerStarted","Data":"413bf1a794cc5fef5829d5a4b344ca850a4d6dcb812c0e8bb3c78a62fc08df16"} Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.581581 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.606669 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-l5rrt"] Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.607176 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-5b9c976747-pgfzj" podStartSLOduration=103.607148601 podStartE2EDuration="1m43.607148601s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:46.594627391 +0000 UTC m=+126.667232778" watchObservedRunningTime="2026-01-30 00:11:46.607148601 +0000 UTC m=+126.679753978" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.614201 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-qljgk"] Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.619000 5113 patch_prober.go:28] interesting pod/packageserver-7d4fc7d867-v7z7g container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.19:5443/healthz\": dial tcp 10.217.0.19:5443: connect: connection refused" start-of-body= Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.619107 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" podUID="de9d205a-be35-4bef-8883-3c11fddc1c8a" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.19:5443/healthz\": dial tcp 10.217.0.19:5443: connect: connection refused" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.637705 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-565b79b866-5vp2x" podStartSLOduration=103.637675821 podStartE2EDuration="1m43.637675821s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:46.625913614 +0000 UTC m=+126.698519001" watchObservedRunningTime="2026-01-30 00:11:46.637675821 +0000 UTC m=+126.710281198" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.649713 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.651486 5113 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-pdqxh container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.651566 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" podUID="8547dc44-d12a-4cf9-a12f-1a1f2dcb3433" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.674073 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:46 crc kubenswrapper[5113]: E0130 00:11:46.676120 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.176104135 +0000 UTC m=+127.248709512 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.676269 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" podStartSLOduration=103.67624176 podStartE2EDuration="1m43.67624176s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:46.666885328 +0000 UTC m=+126.739490705" watchObservedRunningTime="2026-01-30 00:11:46.67624176 +0000 UTC m=+126.748847137" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.676758 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-b5d9m"] Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.681476 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-nvsp7" event={"ID":"61eb8992-aa29-40df-bd7b-c3e937249c67","Type":"ContainerStarted","Data":"fe0da03cd1ffda434c571fcacedf432a5ba4c62ed897840cde168de8ea127270"} Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.720373 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" event={"ID":"fd060364-4a55-4af3-b560-0530d43641d0","Type":"ContainerStarted","Data":"fa22d268daad96b54676a2b55172c35d1606aca49d4d1a54b7eb6bda0a7535b5"} Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.720848 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.737589 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-74545575db-bv8pp" event={"ID":"f36beec6-1904-41d7-bb02-d83e22db2c5a","Type":"ContainerStarted","Data":"8fc38f01f7ef259e7d9d11bd6664b2fff56e84cb513e87fedf61332c5ed52639"} Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.751405 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" podStartSLOduration=103.751377436 podStartE2EDuration="1m43.751377436s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:46.75086484 +0000 UTC m=+126.823470217" watchObservedRunningTime="2026-01-30 00:11:46.751377436 +0000 UTC m=+126.823982813" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.751777 5113 patch_prober.go:28] interesting pod/catalog-operator-75ff9f647d-x975j container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:8443/healthz\": dial tcp 10.217.0.30:8443: connect: connection refused" start-of-body= Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.751836 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" podUID="fd060364-4a55-4af3-b560-0530d43641d0" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.30:8443/healthz\": dial tcp 10.217.0.30:8443: connect: connection refused" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.778838 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:46 crc kubenswrapper[5113]: E0130 00:11:46.782070 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.28204407 +0000 UTC m=+127.354649447 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.806153 5113 ???:1] "http: TLS handshake error from 192.168.126.11:51766: no serving certificate available for the kubelet" Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.884235 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:46 crc kubenswrapper[5113]: E0130 00:11:46.884605 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.38459211 +0000 UTC m=+127.457197477 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:46 crc kubenswrapper[5113]: I0130 00:11:46.989491 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:46 crc kubenswrapper[5113]: E0130 00:11:46.990082 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.49005359 +0000 UTC m=+127.562658977 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5113]: E0130 00:11:47.092392 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.592375241 +0000 UTC m=+127.664980618 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.091972 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.194415 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:47 crc kubenswrapper[5113]: E0130 00:11:47.194942 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.694918371 +0000 UTC m=+127.767523748 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.297363 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:47 crc kubenswrapper[5113]: E0130 00:11:47.297827 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.797809451 +0000 UTC m=+127.870414828 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.399252 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:47 crc kubenswrapper[5113]: E0130 00:11:47.399446 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.899408441 +0000 UTC m=+127.972013818 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.400039 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:47 crc kubenswrapper[5113]: E0130 00:11:47.400461 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:47.900452753 +0000 UTC m=+127.973058130 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.501369 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:47 crc kubenswrapper[5113]: E0130 00:11:47.501844 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.001820836 +0000 UTC m=+128.074426213 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.604883 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:47 crc kubenswrapper[5113]: E0130 00:11:47.605308 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.105293234 +0000 UTC m=+128.177898611 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.646059 5113 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-pdqxh container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.646708 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" podUID="8547dc44-d12a-4cf9-a12f-1a1f2dcb3433" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.707091 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:47 crc kubenswrapper[5113]: E0130 00:11:47.707501 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.207475482 +0000 UTC m=+128.280080859 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.758534 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-74545575db-bv8pp" event={"ID":"f36beec6-1904-41d7-bb02-d83e22db2c5a","Type":"ContainerStarted","Data":"eac1ebbf85b520d8dac9a91d0c4b8b1dff8428aff0c4f47bfa530ce77959ae1a"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.781862 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" event={"ID":"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5","Type":"ContainerStarted","Data":"5bd09563552936749fdaa44b47680a54ff52b3b9e527d4a40c97097ed69ea44c"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.781927 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" event={"ID":"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5","Type":"ContainerStarted","Data":"36304602d6b825edadb7bd48ad221a0f87857646071998b50fc915fd899f6ef5"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.785200 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-ng6js" event={"ID":"3634960c-b798-480d-b652-3ffca7e9cf70","Type":"ContainerStarted","Data":"cc85011457963c250605f9ca4bd6ad9fd5b9c23d73d4043ea05eb4c2d0995063"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.785233 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-ng6js" event={"ID":"3634960c-b798-480d-b652-3ffca7e9cf70","Type":"ContainerStarted","Data":"c55631e0ab5a5cb2f9c65f9c35167e4564e1976230dacef6bf2c1ac7a092a48d"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.785382 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-74545575db-bv8pp" podStartSLOduration=104.785359004 podStartE2EDuration="1m44.785359004s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:47.783245419 +0000 UTC m=+127.855850806" watchObservedRunningTime="2026-01-30 00:11:47.785359004 +0000 UTC m=+127.857964381" Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.797720 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-hzktp" event={"ID":"845806fc-89db-419b-9f4c-b9263fa93527","Type":"ContainerStarted","Data":"30cfbe7297d51787520da9cfd042a3741d671319a2c189ea0547c911da45a29f"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.797779 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-hzktp" event={"ID":"845806fc-89db-419b-9f4c-b9263fa93527","Type":"ContainerStarted","Data":"ead45fb93309f151b7a43c312e110627de3f767eb6d0c9bd80ca8a72fc70b4a2"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.808654 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:47 crc kubenswrapper[5113]: E0130 00:11:47.810548 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.310509787 +0000 UTC m=+128.383115164 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.812007 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" podStartSLOduration=105.811991162 podStartE2EDuration="1m45.811991162s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:47.81128314 +0000 UTC m=+127.883888517" watchObservedRunningTime="2026-01-30 00:11:47.811991162 +0000 UTC m=+127.884596539" Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.812710 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn" event={"ID":"88af8dd9-5994-4276-a701-8dc0af32b4bb","Type":"ContainerStarted","Data":"0319a7f4ab47883e5f82c2d1672e850ce8d518b54b2072b8e9e3ee12687a2fc5"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.822028 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" event={"ID":"62474d91-1e1c-48ee-b28d-bfa517692c72","Type":"ContainerStarted","Data":"ccfb96aa177309c7b9966ad3df03f30a38ef6cf711e6a696f93d7e1cbaf73b71"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.842886 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" event={"ID":"cba9a802-5da5-472a-89eb-8ef391936cb8","Type":"ContainerStarted","Data":"81da466e20dbcf26fc185ca46eb25b891db73d7cb9aaad84df7326a7c6d6d4e2"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.842946 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" event={"ID":"cba9a802-5da5-472a-89eb-8ef391936cb8","Type":"ContainerStarted","Data":"0863cbc27b00e44809369bb4c04ca8ffde10dbdfae1057392851c886f6c5cbea"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.844258 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.847655 5113 patch_prober.go:28] interesting pod/olm-operator-5cdf44d969-vcjpp container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" start-of-body= Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.847715 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" podUID="cba9a802-5da5-472a-89eb-8ef391936cb8" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.853940 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" event={"ID":"a4715539-c6a4-4c6f-9380-73fae6de4fe0","Type":"ContainerStarted","Data":"0e1eb30c073df7241adb094d81e2ac3b253aa0ca625ca3fa2abc841376b92814"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.863942 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" event={"ID":"abc0d911-0769-4cbb-8a02-d5ced71ed5b5","Type":"ContainerStarted","Data":"486d5561210e4cf56ca9977ffaaafc54d46864a2d0cabe38beb9d889d9da5ce9"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.872185 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-866fcbc849-ng6js" podStartSLOduration=104.872165534 podStartE2EDuration="1m44.872165534s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:47.843333737 +0000 UTC m=+127.915939114" watchObservedRunningTime="2026-01-30 00:11:47.872165534 +0000 UTC m=+127.944770911" Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.872397 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-hzktp" podStartSLOduration=9.872391811 podStartE2EDuration="9.872391811s" podCreationTimestamp="2026-01-30 00:11:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:47.869808221 +0000 UTC m=+127.942413598" watchObservedRunningTime="2026-01-30 00:11:47.872391811 +0000 UTC m=+127.944997188" Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.875260 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" event={"ID":"d66505a1-a5af-4565-b177-ab0ea765730c","Type":"ContainerStarted","Data":"70cec0685df828747d1e75c2a88d93bbd7cf00d04c608eb4d562e907bc574d2f"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.875326 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" event={"ID":"d66505a1-a5af-4565-b177-ab0ea765730c","Type":"ContainerStarted","Data":"47b6a0b865279890a68ccfda7f3d8d023f7c7fe7801d476251ff8d936122fe69"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.892719 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9wphq" event={"ID":"1c511f97-df39-47bb-b5e9-58ed11fc3263","Type":"ContainerStarted","Data":"3d8f98129448f838565079c298fedbfbf75e1cab07eeb8d09cc67c1fb0b8d569"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.892791 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-9wphq" event={"ID":"1c511f97-df39-47bb-b5e9-58ed11fc3263","Type":"ContainerStarted","Data":"4402b70db7f0d74f0e833992e22bee0ef6531668f9fd94598bdddb3e65dfccad"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.893916 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.906130 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" podStartSLOduration=104.90611217 podStartE2EDuration="1m44.90611217s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:47.904002284 +0000 UTC m=+127.976607661" watchObservedRunningTime="2026-01-30 00:11:47.90611217 +0000 UTC m=+127.978717547" Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.909974 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb" event={"ID":"256a3e7e-168f-4aaf-81dd-0e4da35fcccc","Type":"ContainerStarted","Data":"699273d3710bcf8344d6436d48132c0f0691388464b93f4b28f8e62fa534830a"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.910359 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.911746 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" event={"ID":"503cdef7-1c26-4522-a0d3-09a28bad0340","Type":"ContainerStarted","Data":"b6a5b18727ea8d7b2d85e7c4a9011162e1844b4abe7c37d48fbe50e51c1dbc93"} Jan 30 00:11:47 crc kubenswrapper[5113]: E0130 00:11:47.912005 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.411979113 +0000 UTC m=+128.484584490 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.913205 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-69db94689b-zr7d6" event={"ID":"48229b09-7bab-42bd-83f2-ad2944222630","Type":"ContainerStarted","Data":"a828f86da11983b6b469ada4c46b4cc523326908a687c566d889db14bcc6cd44"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.914477 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" event={"ID":"d0635667-b4d3-43fb-b783-841a7bf96457","Type":"ContainerStarted","Data":"8f49b27e1841c9915c28dc7c415b06e12fac25f902fc7039e92a1b348b061cdb"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.925908 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-nvsp7" event={"ID":"61eb8992-aa29-40df-bd7b-c3e937249c67","Type":"ContainerStarted","Data":"8ba89a9b1157c15c62182cea6a9a3a16a1ccbea801ea5f95556ae2f3a3294b7c"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.925972 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-nvsp7" event={"ID":"61eb8992-aa29-40df-bd7b-c3e937249c67","Type":"ContainerStarted","Data":"15f1a6d0709133bcc882d132112e6d230b3fd681deaf3e39bf0a1211e5c3402d"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.926902 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-nvsp7" Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.951464 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" podUID="d245ee6c-4b68-41b6-b516-38a882666394" containerName="kube-multus-additional-cni-plugins" containerID="cri-o://631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3" gracePeriod=30 Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.953059 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" event={"ID":"f7e91b03-5282-4f6e-8ed2-a44afa3fc350","Type":"ContainerStarted","Data":"567bebbbe1416f08b7f812e6bca9ea03e75327b2ea279303897af85a298d51bb"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.953094 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" event={"ID":"f7e91b03-5282-4f6e-8ed2-a44afa3fc350","Type":"ContainerStarted","Data":"0a28b1aeae4e9c87f2f10a4a8902ce0f4eecdee7012990904b8c335713f17280"} Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.953109 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.956036 5113 patch_prober.go:28] interesting pod/packageserver-7d4fc7d867-v7z7g container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.19:5443/healthz\": dial tcp 10.217.0.19:5443: connect: connection refused" start-of-body= Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.956109 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" podUID="de9d205a-be35-4bef-8883-3c11fddc1c8a" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.19:5443/healthz\": dial tcp 10.217.0.19:5443: connect: connection refused" Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.956193 5113 patch_prober.go:28] interesting pod/catalog-operator-75ff9f647d-x975j container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.30:8443/healthz\": dial tcp 10.217.0.30:8443: connect: connection refused" start-of-body= Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.956209 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" podUID="fd060364-4a55-4af3-b560-0530d43641d0" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.30:8443/healthz\": dial tcp 10.217.0.30:8443: connect: connection refused" Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.967620 5113 patch_prober.go:28] interesting pod/marketplace-operator-547dbd544d-qljgk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.967677 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" podUID="f7e91b03-5282-4f6e-8ed2-a44afa3fc350" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 30 00:11:47 crc kubenswrapper[5113]: I0130 00:11:47.995055 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-846cbfc458-zmdjn" podStartSLOduration=104.995034925 podStartE2EDuration="1m44.995034925s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:47.9530688 +0000 UTC m=+128.025674197" watchObservedRunningTime="2026-01-30 00:11:47.995034925 +0000 UTC m=+128.067640292" Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.014420 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:48 crc kubenswrapper[5113]: E0130 00:11:48.015131 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.51511383 +0000 UTC m=+128.587719197 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.033447 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-575994946d-pstq8" podStartSLOduration=105.033421169 podStartE2EDuration="1m45.033421169s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:47.996112768 +0000 UTC m=+128.068718145" watchObservedRunningTime="2026-01-30 00:11:48.033421169 +0000 UTC m=+128.106026546" Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.065777 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-69b85846b6-t984r" podStartSLOduration=106.065753955 podStartE2EDuration="1m46.065753955s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:48.03569792 +0000 UTC m=+128.108303297" watchObservedRunningTime="2026-01-30 00:11:48.065753955 +0000 UTC m=+128.138359332" Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.099141 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-9wphq" podStartSLOduration=10.099120213 podStartE2EDuration="10.099120213s" podCreationTimestamp="2026-01-30 00:11:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:48.097092949 +0000 UTC m=+128.169698326" watchObservedRunningTime="2026-01-30 00:11:48.099120213 +0000 UTC m=+128.171725590" Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.100574 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-69d5f845f8-pnrbz" podStartSLOduration=105.100560187 podStartE2EDuration="1m45.100560187s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:48.064554587 +0000 UTC m=+128.137159964" watchObservedRunningTime="2026-01-30 00:11:48.100560187 +0000 UTC m=+128.173165554" Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.119297 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:48 crc kubenswrapper[5113]: E0130 00:11:48.119699 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.619671951 +0000 UTC m=+128.692277318 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.121056 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-nvsp7" podStartSLOduration=105.121040084 podStartE2EDuration="1m45.121040084s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:48.120999163 +0000 UTC m=+128.193604540" watchObservedRunningTime="2026-01-30 00:11:48.121040084 +0000 UTC m=+128.193645461" Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.133112 5113 ???:1] "http: TLS handshake error from 192.168.126.11:51778: no serving certificate available for the kubelet" Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.222612 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:48 crc kubenswrapper[5113]: E0130 00:11:48.223092 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.723075867 +0000 UTC m=+128.795681244 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.224938 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-f9cdd68f7-v8gqb" podStartSLOduration=105.224921275 podStartE2EDuration="1m45.224921275s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:48.159920513 +0000 UTC m=+128.232525890" watchObservedRunningTime="2026-01-30 00:11:48.224921275 +0000 UTC m=+128.297526652" Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.227594 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" podStartSLOduration=105.227566357 podStartE2EDuration="1m45.227566357s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:48.19002833 +0000 UTC m=+128.262633717" watchObservedRunningTime="2026-01-30 00:11:48.227566357 +0000 UTC m=+128.300171734" Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.237392 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-69db94689b-zr7d6" podStartSLOduration=105.237088983 podStartE2EDuration="1m45.237088983s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:48.225200324 +0000 UTC m=+128.297805701" watchObservedRunningTime="2026-01-30 00:11:48.237088983 +0000 UTC m=+128.309694360" Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.265676 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-54f497555d-nm82k" podStartSLOduration=105.265658092 podStartE2EDuration="1m45.265658092s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:48.264907728 +0000 UTC m=+128.337513105" watchObservedRunningTime="2026-01-30 00:11:48.265658092 +0000 UTC m=+128.338263469" Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.325277 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:48 crc kubenswrapper[5113]: E0130 00:11:48.325951 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.825927547 +0000 UTC m=+128.898532934 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.340987 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-5777786469-kphvh" Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.427394 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:48 crc kubenswrapper[5113]: E0130 00:11:48.428118 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:48.928060593 +0000 UTC m=+129.000665970 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.529121 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:48 crc kubenswrapper[5113]: E0130 00:11:48.529239 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.029214439 +0000 UTC m=+129.101819816 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.529569 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:48 crc kubenswrapper[5113]: E0130 00:11:48.529954 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.029945922 +0000 UTC m=+129.102551299 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.631040 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:48 crc kubenswrapper[5113]: E0130 00:11:48.631615 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.131594512 +0000 UTC m=+129.204199889 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.651529 5113 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-pdqxh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:11:48 crc kubenswrapper[5113]: [-]has-synced failed: reason withheld Jan 30 00:11:48 crc kubenswrapper[5113]: [+]process-running ok Jan 30 00:11:48 crc kubenswrapper[5113]: healthz check failed Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.651642 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" podUID="8547dc44-d12a-4cf9-a12f-1a1f2dcb3433" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.733052 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:48 crc kubenswrapper[5113]: E0130 00:11:48.733554 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.233538203 +0000 UTC m=+129.306143580 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.733919 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.736885 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.804451 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.810194 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-8596bd845d-9swvg" Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.835037 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:48 crc kubenswrapper[5113]: E0130 00:11:48.835288 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.335242037 +0000 UTC m=+129.407847414 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.939287 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:48 crc kubenswrapper[5113]: E0130 00:11:48.941004 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.440977245 +0000 UTC m=+129.513582822 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.961222 5113 patch_prober.go:28] interesting pod/apiserver-9ddfb9f55-s5qkt container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 30 00:11:48 crc kubenswrapper[5113]: [+]log ok Jan 30 00:11:48 crc kubenswrapper[5113]: [+]etcd ok Jan 30 00:11:48 crc kubenswrapper[5113]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 30 00:11:48 crc kubenswrapper[5113]: [+]poststarthook/generic-apiserver-start-informers ok Jan 30 00:11:48 crc kubenswrapper[5113]: [+]poststarthook/max-in-flight-filter ok Jan 30 00:11:48 crc kubenswrapper[5113]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 30 00:11:48 crc kubenswrapper[5113]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 30 00:11:48 crc kubenswrapper[5113]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 30 00:11:48 crc kubenswrapper[5113]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Jan 30 00:11:48 crc kubenswrapper[5113]: [+]poststarthook/project.openshift.io-projectcache ok Jan 30 00:11:48 crc kubenswrapper[5113]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 30 00:11:48 crc kubenswrapper[5113]: [-]poststarthook/openshift.io-startinformers failed: reason withheld Jan 30 00:11:48 crc kubenswrapper[5113]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 30 00:11:48 crc kubenswrapper[5113]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 30 00:11:48 crc kubenswrapper[5113]: livez check failed Jan 30 00:11:48 crc kubenswrapper[5113]: I0130 00:11:48.961323 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" podUID="37c8062b-4496-46f0-9562-9f9d27740557" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.011810 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" event={"ID":"62474d91-1e1c-48ee-b28d-bfa517692c72","Type":"ContainerStarted","Data":"44303fbf12cf56eb2af44dda163ec8294445ec52bf54b2dd9742433cef3671d7"} Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.019962 5113 patch_prober.go:28] interesting pod/olm-operator-5cdf44d969-vcjpp container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" start-of-body= Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.020023 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" podUID="cba9a802-5da5-472a-89eb-8ef391936cb8" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.024131 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.024317 5113 patch_prober.go:28] interesting pod/marketplace-operator-547dbd544d-qljgk container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" start-of-body= Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.024382 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" podUID="f7e91b03-5282-4f6e-8ed2-a44afa3fc350" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.41:8080/healthz\": dial tcp 10.217.0.41:8080: connect: connection refused" Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.042429 5113 patch_prober.go:28] interesting pod/oauth-openshift-66458b6674-65wnm container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.16:6443/healthz\": dial tcp 10.217.0.16:6443: connect: connection refused" start-of-body= Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.042502 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.16:6443/healthz\": dial tcp 10.217.0.16:6443: connect: connection refused" Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.043131 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:49 crc kubenswrapper[5113]: E0130 00:11:49.043883 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.543857314 +0000 UTC m=+129.616462691 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.044065 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-75ff9f647d-x975j" Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.145225 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:49 crc kubenswrapper[5113]: E0130 00:11:49.151360 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.651338167 +0000 UTC m=+129.723943544 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.170358 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" podStartSLOduration=106.170340598 podStartE2EDuration="1m46.170340598s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:49.16588923 +0000 UTC m=+129.238494607" watchObservedRunningTime="2026-01-30 00:11:49.170340598 +0000 UTC m=+129.242945975" Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.247086 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:49 crc kubenswrapper[5113]: E0130 00:11:49.247393 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.747342793 +0000 UTC m=+129.819948180 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.248090 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:49 crc kubenswrapper[5113]: E0130 00:11:49.248562 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.74855287 +0000 UTC m=+129.821158247 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.308249 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-7d4fc7d867-v7z7g" Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.349893 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:49 crc kubenswrapper[5113]: E0130 00:11:49.350088 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.850059798 +0000 UTC m=+129.922665165 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.350206 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:49 crc kubenswrapper[5113]: E0130 00:11:49.350719 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.850693818 +0000 UTC m=+129.923299205 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.451927 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:49 crc kubenswrapper[5113]: E0130 00:11:49.452179 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.952146062 +0000 UTC m=+130.024751439 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.452968 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:49 crc kubenswrapper[5113]: E0130 00:11:49.453329 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:49.953322049 +0000 UTC m=+130.025927426 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.555056 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:49 crc kubenswrapper[5113]: E0130 00:11:49.555254 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.055222059 +0000 UTC m=+130.127827436 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.555856 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:49 crc kubenswrapper[5113]: E0130 00:11:49.556326 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.056305172 +0000 UTC m=+130.128910729 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.646848 5113 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-pdqxh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:11:49 crc kubenswrapper[5113]: [-]has-synced failed: reason withheld Jan 30 00:11:49 crc kubenswrapper[5113]: [+]process-running ok Jan 30 00:11:49 crc kubenswrapper[5113]: healthz check failed Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.646946 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" podUID="8547dc44-d12a-4cf9-a12f-1a1f2dcb3433" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.657955 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:49 crc kubenswrapper[5113]: E0130 00:11:49.658353 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.158331746 +0000 UTC m=+130.230937123 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.759604 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:49 crc kubenswrapper[5113]: E0130 00:11:49.760391 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.260362048 +0000 UTC m=+130.332967585 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.850364 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.850492 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.860833 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:49 crc kubenswrapper[5113]: E0130 00:11:49.861047 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.361004928 +0000 UTC m=+130.433610305 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.861826 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:49 crc kubenswrapper[5113]: E0130 00:11:49.862277 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.362255418 +0000 UTC m=+130.434860795 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.863663 5113 patch_prober.go:28] interesting pod/console-64d44f6ddf-6z7rp container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.12:8443/health\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.863726 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-console/console-64d44f6ddf-6z7rp" podUID="a5ca6cf7-be11-4cd5-952b-b890c7e3b26e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.12:8443/health\": dial tcp 10.217.0.12:8443: connect: connection refused" Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.903690 5113 patch_prober.go:28] interesting pod/downloads-747b44746d-kxkvb container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.903812 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-747b44746d-kxkvb" podUID="d6483c17-196a-4e41-8950-46d60c5505c9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 30 00:11:49 crc kubenswrapper[5113]: E0130 00:11:49.963863 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.463833997 +0000 UTC m=+130.536439374 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.963724 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:49 crc kubenswrapper[5113]: I0130 00:11:49.965827 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:49 crc kubenswrapper[5113]: E0130 00:11:49.966256 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.466232051 +0000 UTC m=+130.538837428 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.035877 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" event={"ID":"abc0d911-0769-4cbb-8a02-d5ced71ed5b5","Type":"ContainerStarted","Data":"99d4131a9aeb00a2d0ba4a854f5dd18a52d174908a68fa42e31ef1ddde2b9864"} Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.070600 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:50 crc kubenswrapper[5113]: E0130 00:11:50.071183 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.571156144 +0000 UTC m=+130.643761521 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.115191 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-znr9f"] Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.143411 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-znr9f"] Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.143621 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-znr9f" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.149433 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"certified-operators-dockercfg-7cl8d\"" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.191382 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:50 crc kubenswrapper[5113]: E0130 00:11:50.192654 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.692636463 +0000 UTC m=+130.765241840 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.292788 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:50 crc kubenswrapper[5113]: E0130 00:11:50.293074 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.793026805 +0000 UTC m=+130.865632182 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.293741 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d28df1d-4619-45dc-8fee-b482cfad0ead-utilities\") pod \"certified-operators-znr9f\" (UID: \"2d28df1d-4619-45dc-8fee-b482cfad0ead\") " pod="openshift-marketplace/certified-operators-znr9f" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.293816 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d28df1d-4619-45dc-8fee-b482cfad0ead-catalog-content\") pod \"certified-operators-znr9f\" (UID: \"2d28df1d-4619-45dc-8fee-b482cfad0ead\") " pod="openshift-marketplace/certified-operators-znr9f" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.294091 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hxgh\" (UniqueName: \"kubernetes.io/projected/2d28df1d-4619-45dc-8fee-b482cfad0ead-kube-api-access-7hxgh\") pod \"certified-operators-znr9f\" (UID: \"2d28df1d-4619-45dc-8fee-b482cfad0ead\") " pod="openshift-marketplace/certified-operators-znr9f" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.294318 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:50 crc kubenswrapper[5113]: E0130 00:11:50.294987 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.794969215 +0000 UTC m=+130.867574592 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.313160 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vjvch"] Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.322752 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vjvch" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.330384 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"community-operators-dockercfg-vrd5f\"" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.339119 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vjvch"] Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.395284 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:50 crc kubenswrapper[5113]: E0130 00:11:50.395556 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.895496401 +0000 UTC m=+130.968101778 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.395737 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d28df1d-4619-45dc-8fee-b482cfad0ead-utilities\") pod \"certified-operators-znr9f\" (UID: \"2d28df1d-4619-45dc-8fee-b482cfad0ead\") " pod="openshift-marketplace/certified-operators-znr9f" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.395894 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d28df1d-4619-45dc-8fee-b482cfad0ead-catalog-content\") pod \"certified-operators-znr9f\" (UID: \"2d28df1d-4619-45dc-8fee-b482cfad0ead\") " pod="openshift-marketplace/certified-operators-znr9f" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.396100 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-7hxgh\" (UniqueName: \"kubernetes.io/projected/2d28df1d-4619-45dc-8fee-b482cfad0ead-kube-api-access-7hxgh\") pod \"certified-operators-znr9f\" (UID: \"2d28df1d-4619-45dc-8fee-b482cfad0ead\") " pod="openshift-marketplace/certified-operators-znr9f" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.396220 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.396288 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d28df1d-4619-45dc-8fee-b482cfad0ead-utilities\") pod \"certified-operators-znr9f\" (UID: \"2d28df1d-4619-45dc-8fee-b482cfad0ead\") " pod="openshift-marketplace/certified-operators-znr9f" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.396428 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d28df1d-4619-45dc-8fee-b482cfad0ead-catalog-content\") pod \"certified-operators-znr9f\" (UID: \"2d28df1d-4619-45dc-8fee-b482cfad0ead\") " pod="openshift-marketplace/certified-operators-znr9f" Jan 30 00:11:50 crc kubenswrapper[5113]: E0130 00:11:50.396764 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.896753311 +0000 UTC m=+130.969358898 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.416412 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.424884 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hxgh\" (UniqueName: \"kubernetes.io/projected/2d28df1d-4619-45dc-8fee-b482cfad0ead-kube-api-access-7hxgh\") pod \"certified-operators-znr9f\" (UID: \"2d28df1d-4619-45dc-8fee-b482cfad0ead\") " pod="openshift-marketplace/certified-operators-znr9f" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.463246 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-znr9f" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.498032 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.498374 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-catalog-content\") pod \"community-operators-vjvch\" (UID: \"5d9cf73a-1c55-49b9-9664-393fd1ea11ec\") " pod="openshift-marketplace/community-operators-vjvch" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.498423 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-utilities\") pod \"community-operators-vjvch\" (UID: \"5d9cf73a-1c55-49b9-9664-393fd1ea11ec\") " pod="openshift-marketplace/community-operators-vjvch" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.498442 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxcq8\" (UniqueName: \"kubernetes.io/projected/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-kube-api-access-sxcq8\") pod \"community-operators-vjvch\" (UID: \"5d9cf73a-1c55-49b9-9664-393fd1ea11ec\") " pod="openshift-marketplace/community-operators-vjvch" Jan 30 00:11:50 crc kubenswrapper[5113]: E0130 00:11:50.498751 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:50.998730063 +0000 UTC m=+131.071335440 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.509169 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hr22j"] Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.536872 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hr22j"] Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.537072 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hr22j" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.601290 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-catalog-content\") pod \"community-operators-vjvch\" (UID: \"5d9cf73a-1c55-49b9-9664-393fd1ea11ec\") " pod="openshift-marketplace/community-operators-vjvch" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.601359 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-utilities\") pod \"community-operators-vjvch\" (UID: \"5d9cf73a-1c55-49b9-9664-393fd1ea11ec\") " pod="openshift-marketplace/community-operators-vjvch" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.601385 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-sxcq8\" (UniqueName: \"kubernetes.io/projected/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-kube-api-access-sxcq8\") pod \"community-operators-vjvch\" (UID: \"5d9cf73a-1c55-49b9-9664-393fd1ea11ec\") " pod="openshift-marketplace/community-operators-vjvch" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.601463 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:50 crc kubenswrapper[5113]: E0130 00:11:50.601797 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.101784938 +0000 UTC m=+131.174390315 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.602310 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-catalog-content\") pod \"community-operators-vjvch\" (UID: \"5d9cf73a-1c55-49b9-9664-393fd1ea11ec\") " pod="openshift-marketplace/community-operators-vjvch" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.602610 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-utilities\") pod \"community-operators-vjvch\" (UID: \"5d9cf73a-1c55-49b9-9664-393fd1ea11ec\") " pod="openshift-marketplace/community-operators-vjvch" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.630077 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxcq8\" (UniqueName: \"kubernetes.io/projected/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-kube-api-access-sxcq8\") pod \"community-operators-vjvch\" (UID: \"5d9cf73a-1c55-49b9-9664-393fd1ea11ec\") " pod="openshift-marketplace/community-operators-vjvch" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.642886 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vjvch" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.647170 5113 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-pdqxh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:11:50 crc kubenswrapper[5113]: [-]has-synced failed: reason withheld Jan 30 00:11:50 crc kubenswrapper[5113]: [+]process-running ok Jan 30 00:11:50 crc kubenswrapper[5113]: healthz check failed Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.647213 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" podUID="8547dc44-d12a-4cf9-a12f-1a1f2dcb3433" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.702914 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:50 crc kubenswrapper[5113]: E0130 00:11:50.703165 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.20314492 +0000 UTC m=+131.275750297 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.703336 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b49ee2f-63ad-4580-b29c-8ebcade15f14-catalog-content\") pod \"certified-operators-hr22j\" (UID: \"6b49ee2f-63ad-4580-b29c-8ebcade15f14\") " pod="openshift-marketplace/certified-operators-hr22j" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.703400 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b49ee2f-63ad-4580-b29c-8ebcade15f14-utilities\") pod \"certified-operators-hr22j\" (UID: \"6b49ee2f-63ad-4580-b29c-8ebcade15f14\") " pod="openshift-marketplace/certified-operators-hr22j" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.703514 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mth44\" (UniqueName: \"kubernetes.io/projected/6b49ee2f-63ad-4580-b29c-8ebcade15f14-kube-api-access-mth44\") pod \"certified-operators-hr22j\" (UID: \"6b49ee2f-63ad-4580-b29c-8ebcade15f14\") " pod="openshift-marketplace/certified-operators-hr22j" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.703568 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:50 crc kubenswrapper[5113]: E0130 00:11:50.703905 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.203878682 +0000 UTC m=+131.276484059 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.706056 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rbzxk"] Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.721910 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rbzxk"] Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.722187 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rbzxk" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.733612 5113 ???:1] "http: TLS handshake error from 192.168.126.11:51792: no serving certificate available for the kubelet" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.804277 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.804673 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b49ee2f-63ad-4580-b29c-8ebcade15f14-catalog-content\") pod \"certified-operators-hr22j\" (UID: \"6b49ee2f-63ad-4580-b29c-8ebcade15f14\") " pod="openshift-marketplace/certified-operators-hr22j" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.804740 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b49ee2f-63ad-4580-b29c-8ebcade15f14-utilities\") pod \"certified-operators-hr22j\" (UID: \"6b49ee2f-63ad-4580-b29c-8ebcade15f14\") " pod="openshift-marketplace/certified-operators-hr22j" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.804849 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-mth44\" (UniqueName: \"kubernetes.io/projected/6b49ee2f-63ad-4580-b29c-8ebcade15f14-kube-api-access-mth44\") pod \"certified-operators-hr22j\" (UID: \"6b49ee2f-63ad-4580-b29c-8ebcade15f14\") " pod="openshift-marketplace/certified-operators-hr22j" Jan 30 00:11:50 crc kubenswrapper[5113]: E0130 00:11:50.805016 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.304966966 +0000 UTC m=+131.377572433 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.805666 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b49ee2f-63ad-4580-b29c-8ebcade15f14-catalog-content\") pod \"certified-operators-hr22j\" (UID: \"6b49ee2f-63ad-4580-b29c-8ebcade15f14\") " pod="openshift-marketplace/certified-operators-hr22j" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.810838 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b49ee2f-63ad-4580-b29c-8ebcade15f14-utilities\") pod \"certified-operators-hr22j\" (UID: \"6b49ee2f-63ad-4580-b29c-8ebcade15f14\") " pod="openshift-marketplace/certified-operators-hr22j" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.831882 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-mth44\" (UniqueName: \"kubernetes.io/projected/6b49ee2f-63ad-4580-b29c-8ebcade15f14-kube-api-access-mth44\") pod \"certified-operators-hr22j\" (UID: \"6b49ee2f-63ad-4580-b29c-8ebcade15f14\") " pod="openshift-marketplace/certified-operators-hr22j" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.885325 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hr22j" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.909079 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.909381 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed2eb5b7-1d01-4029-86f8-47a057a0352e-catalog-content\") pod \"community-operators-rbzxk\" (UID: \"ed2eb5b7-1d01-4029-86f8-47a057a0352e\") " pod="openshift-marketplace/community-operators-rbzxk" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.909539 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed2eb5b7-1d01-4029-86f8-47a057a0352e-utilities\") pod \"community-operators-rbzxk\" (UID: \"ed2eb5b7-1d01-4029-86f8-47a057a0352e\") " pod="openshift-marketplace/community-operators-rbzxk" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.909690 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrqf2\" (UniqueName: \"kubernetes.io/projected/ed2eb5b7-1d01-4029-86f8-47a057a0352e-kube-api-access-qrqf2\") pod \"community-operators-rbzxk\" (UID: \"ed2eb5b7-1d01-4029-86f8-47a057a0352e\") " pod="openshift-marketplace/community-operators-rbzxk" Jan 30 00:11:50 crc kubenswrapper[5113]: E0130 00:11:50.909823 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.409791087 +0000 UTC m=+131.482396464 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.914193 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/revision-pruner-6-crc"] Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.950781 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler/revision-pruner-6-crc"] Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.951020 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.957191 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-scheduler\"/\"installer-sa-dockercfg-qpkss\"" Jan 30 00:11:50 crc kubenswrapper[5113]: I0130 00:11:50.984818 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-scheduler\"/\"kube-root-ca.crt\"" Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.019824 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.020147 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed2eb5b7-1d01-4029-86f8-47a057a0352e-catalog-content\") pod \"community-operators-rbzxk\" (UID: \"ed2eb5b7-1d01-4029-86f8-47a057a0352e\") " pod="openshift-marketplace/community-operators-rbzxk" Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.020225 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed2eb5b7-1d01-4029-86f8-47a057a0352e-utilities\") pod \"community-operators-rbzxk\" (UID: \"ed2eb5b7-1d01-4029-86f8-47a057a0352e\") " pod="openshift-marketplace/community-operators-rbzxk" Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.020282 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-qrqf2\" (UniqueName: \"kubernetes.io/projected/ed2eb5b7-1d01-4029-86f8-47a057a0352e-kube-api-access-qrqf2\") pod \"community-operators-rbzxk\" (UID: \"ed2eb5b7-1d01-4029-86f8-47a057a0352e\") " pod="openshift-marketplace/community-operators-rbzxk" Jan 30 00:11:51 crc kubenswrapper[5113]: E0130 00:11:51.020888 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.520842371 +0000 UTC m=+131.593447748 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.022006 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed2eb5b7-1d01-4029-86f8-47a057a0352e-catalog-content\") pod \"community-operators-rbzxk\" (UID: \"ed2eb5b7-1d01-4029-86f8-47a057a0352e\") " pod="openshift-marketplace/community-operators-rbzxk" Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.022273 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed2eb5b7-1d01-4029-86f8-47a057a0352e-utilities\") pod \"community-operators-rbzxk\" (UID: \"ed2eb5b7-1d01-4029-86f8-47a057a0352e\") " pod="openshift-marketplace/community-operators-rbzxk" Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.084815 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrqf2\" (UniqueName: \"kubernetes.io/projected/ed2eb5b7-1d01-4029-86f8-47a057a0352e-kube-api-access-qrqf2\") pod \"community-operators-rbzxk\" (UID: \"ed2eb5b7-1d01-4029-86f8-47a057a0352e\") " pod="openshift-marketplace/community-operators-rbzxk" Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.140432 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.141030 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/aab16c0a-2713-42b2-b14a-c5da3434319a-kubelet-dir\") pod \"revision-pruner-6-crc\" (UID: \"aab16c0a-2713-42b2-b14a-c5da3434319a\") " pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.141106 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/aab16c0a-2713-42b2-b14a-c5da3434319a-kube-api-access\") pod \"revision-pruner-6-crc\" (UID: \"aab16c0a-2713-42b2-b14a-c5da3434319a\") " pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:51 crc kubenswrapper[5113]: E0130 00:11:51.145618 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.64559271 +0000 UTC m=+131.718198247 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.244911 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.245140 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/aab16c0a-2713-42b2-b14a-c5da3434319a-kubelet-dir\") pod \"revision-pruner-6-crc\" (UID: \"aab16c0a-2713-42b2-b14a-c5da3434319a\") " pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.245187 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/aab16c0a-2713-42b2-b14a-c5da3434319a-kube-api-access\") pod \"revision-pruner-6-crc\" (UID: \"aab16c0a-2713-42b2-b14a-c5da3434319a\") " pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:51 crc kubenswrapper[5113]: E0130 00:11:51.245774 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.745748506 +0000 UTC m=+131.818353883 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.245826 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/aab16c0a-2713-42b2-b14a-c5da3434319a-kubelet-dir\") pod \"revision-pruner-6-crc\" (UID: \"aab16c0a-2713-42b2-b14a-c5da3434319a\") " pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.268063 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/aab16c0a-2713-42b2-b14a-c5da3434319a-kube-api-access\") pod \"revision-pruner-6-crc\" (UID: \"aab16c0a-2713-42b2-b14a-c5da3434319a\") " pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.307321 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vjvch"] Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.346915 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rbzxk" Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.348311 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:51 crc kubenswrapper[5113]: E0130 00:11:51.348845 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.848827341 +0000 UTC m=+131.921432718 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.443407 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.450581 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:51 crc kubenswrapper[5113]: E0130 00:11:51.450770 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.95073017 +0000 UTC m=+132.023335547 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.451450 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:51 crc kubenswrapper[5113]: E0130 00:11:51.451946 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:51.951934598 +0000 UTC m=+132.024539975 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.507702 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-znr9f"] Jan 30 00:11:51 crc kubenswrapper[5113]: W0130 00:11:51.516012 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2d28df1d_4619_45dc_8fee_b482cfad0ead.slice/crio-40be437a7dc5c9f625e23929fb2ebfb7caf3abaa47c733873e62abe6e549f488 WatchSource:0}: Error finding container 40be437a7dc5c9f625e23929fb2ebfb7caf3abaa47c733873e62abe6e549f488: Status 404 returned error can't find the container with id 40be437a7dc5c9f625e23929fb2ebfb7caf3abaa47c733873e62abe6e549f488 Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.552809 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:51 crc kubenswrapper[5113]: E0130 00:11:51.553032 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.052983771 +0000 UTC m=+132.125589138 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.553659 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:51 crc kubenswrapper[5113]: E0130 00:11:51.554268 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.054259961 +0000 UTC m=+132.126865338 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.651592 5113 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-pdqxh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:11:51 crc kubenswrapper[5113]: [-]has-synced failed: reason withheld Jan 30 00:11:51 crc kubenswrapper[5113]: [+]process-running ok Jan 30 00:11:51 crc kubenswrapper[5113]: healthz check failed Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.651679 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" podUID="8547dc44-d12a-4cf9-a12f-1a1f2dcb3433" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.656657 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:51 crc kubenswrapper[5113]: E0130 00:11:51.657066 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.157048357 +0000 UTC m=+132.229653734 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.759310 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:51 crc kubenswrapper[5113]: E0130 00:11:51.760283 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.260262527 +0000 UTC m=+132.332867914 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.810677 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hr22j"] Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.860931 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:51 crc kubenswrapper[5113]: E0130 00:11:51.862423 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.362400603 +0000 UTC m=+132.435005990 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.866332 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler/revision-pruner-6-crc"] Jan 30 00:11:51 crc kubenswrapper[5113]: W0130 00:11:51.885949 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6b49ee2f_63ad_4580_b29c_8ebcade15f14.slice/crio-37cdf9f19179ad2b1894eb3048fd565c84c337985c1f4a9c460ac96d125c3515 WatchSource:0}: Error finding container 37cdf9f19179ad2b1894eb3048fd565c84c337985c1f4a9c460ac96d125c3515: Status 404 returned error can't find the container with id 37cdf9f19179ad2b1894eb3048fd565c84c337985c1f4a9c460ac96d125c3515 Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.964443 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:51 crc kubenswrapper[5113]: E0130 00:11:51.964936 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.464918922 +0000 UTC m=+132.537524299 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:51 crc kubenswrapper[5113]: I0130 00:11:51.977286 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rbzxk"] Jan 30 00:11:52 crc kubenswrapper[5113]: W0130 00:11:52.007443 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poded2eb5b7_1d01_4029_86f8_47a057a0352e.slice/crio-8f908eac179a11bd10cc8b711796baaa741868e2fd3d2a6cca58c0a3e4a459f9 WatchSource:0}: Error finding container 8f908eac179a11bd10cc8b711796baaa741868e2fd3d2a6cca58c0a3e4a459f9: Status 404 returned error can't find the container with id 8f908eac179a11bd10cc8b711796baaa741868e2fd3d2a6cca58c0a3e4a459f9 Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.066415 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:52 crc kubenswrapper[5113]: E0130 00:11:52.066615 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.566579784 +0000 UTC m=+132.639185161 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.067466 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:52 crc kubenswrapper[5113]: E0130 00:11:52.067943 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.567935296 +0000 UTC m=+132.640540673 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.084666 5113 generic.go:358] "Generic (PLEG): container finished" podID="2d28df1d-4619-45dc-8fee-b482cfad0ead" containerID="4166c8f4a2a8e784d449476671b8081adec9edad3e6efa3d3c03ec0e7552e4cc" exitCode=0 Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.084799 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-znr9f" event={"ID":"2d28df1d-4619-45dc-8fee-b482cfad0ead","Type":"ContainerDied","Data":"4166c8f4a2a8e784d449476671b8081adec9edad3e6efa3d3c03ec0e7552e4cc"} Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.084835 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-znr9f" event={"ID":"2d28df1d-4619-45dc-8fee-b482cfad0ead","Type":"ContainerStarted","Data":"40be437a7dc5c9f625e23929fb2ebfb7caf3abaa47c733873e62abe6e549f488"} Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.106739 5113 generic.go:358] "Generic (PLEG): container finished" podID="5d9cf73a-1c55-49b9-9664-393fd1ea11ec" containerID="46fee9fbba0f7282fcae39a5f0c40f093eec48b1d3c8a960ad85f4a6ccf589ab" exitCode=0 Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.106847 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vjvch" event={"ID":"5d9cf73a-1c55-49b9-9664-393fd1ea11ec","Type":"ContainerDied","Data":"46fee9fbba0f7282fcae39a5f0c40f093eec48b1d3c8a960ad85f4a6ccf589ab"} Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.106883 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vjvch" event={"ID":"5d9cf73a-1c55-49b9-9664-393fd1ea11ec","Type":"ContainerStarted","Data":"eb6d91bfd39147575a59dc2b1f4aa4669d2efc7fccb782668ce066e65af9c905"} Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.139567 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbzxk" event={"ID":"ed2eb5b7-1d01-4029-86f8-47a057a0352e","Type":"ContainerStarted","Data":"8f908eac179a11bd10cc8b711796baaa741868e2fd3d2a6cca58c0a3e4a459f9"} Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.143986 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/revision-pruner-6-crc" event={"ID":"aab16c0a-2713-42b2-b14a-c5da3434319a","Type":"ContainerStarted","Data":"b9397763883e1c507c6d9176db8afe363c179d6b5b2a8e7b7f0d9c086fb01206"} Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.148757 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hr22j" event={"ID":"6b49ee2f-63ad-4580-b29c-8ebcade15f14","Type":"ContainerStarted","Data":"37cdf9f19179ad2b1894eb3048fd565c84c337985c1f4a9c460ac96d125c3515"} Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.151617 5113 generic.go:358] "Generic (PLEG): container finished" podID="89390c7a-b6e2-43fb-8b63-4df42bbbd9b5" containerID="5bd09563552936749fdaa44b47680a54ff52b3b9e527d4a40c97097ed69ea44c" exitCode=0 Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.151688 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" event={"ID":"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5","Type":"ContainerDied","Data":"5bd09563552936749fdaa44b47680a54ff52b3b9e527d4a40c97097ed69ea44c"} Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.169454 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:52 crc kubenswrapper[5113]: E0130 00:11:52.169859 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.669810005 +0000 UTC m=+132.742415382 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.170955 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:52 crc kubenswrapper[5113]: E0130 00:11:52.174069 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.674036546 +0000 UTC m=+132.746641923 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.272126 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:52 crc kubenswrapper[5113]: E0130 00:11:52.272441 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.772407775 +0000 UTC m=+132.845013152 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.273986 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:52 crc kubenswrapper[5113]: E0130 00:11:52.274881 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.774867302 +0000 UTC m=+132.847472679 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.308635 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gv2cj"] Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.315912 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gv2cj"] Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.316085 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gv2cj" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.321555 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"redhat-marketplace-dockercfg-gg4w7\"" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.376687 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.376870 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a527979f-66df-45e5-a643-2c2ebc9fc7c6-utilities\") pod \"redhat-marketplace-gv2cj\" (UID: \"a527979f-66df-45e5-a643-2c2ebc9fc7c6\") " pod="openshift-marketplace/redhat-marketplace-gv2cj" Jan 30 00:11:52 crc kubenswrapper[5113]: E0130 00:11:52.376949 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.876903206 +0000 UTC m=+132.949508583 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.377274 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fwsm\" (UniqueName: \"kubernetes.io/projected/a527979f-66df-45e5-a643-2c2ebc9fc7c6-kube-api-access-6fwsm\") pod \"redhat-marketplace-gv2cj\" (UID: \"a527979f-66df-45e5-a643-2c2ebc9fc7c6\") " pod="openshift-marketplace/redhat-marketplace-gv2cj" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.377499 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a527979f-66df-45e5-a643-2c2ebc9fc7c6-catalog-content\") pod \"redhat-marketplace-gv2cj\" (UID: \"a527979f-66df-45e5-a643-2c2ebc9fc7c6\") " pod="openshift-marketplace/redhat-marketplace-gv2cj" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.377673 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:52 crc kubenswrapper[5113]: E0130 00:11:52.378150 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.878143544 +0000 UTC m=+132.950748921 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.478393 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:52 crc kubenswrapper[5113]: E0130 00:11:52.478670 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.978607948 +0000 UTC m=+133.051213325 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.479276 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-6fwsm\" (UniqueName: \"kubernetes.io/projected/a527979f-66df-45e5-a643-2c2ebc9fc7c6-kube-api-access-6fwsm\") pod \"redhat-marketplace-gv2cj\" (UID: \"a527979f-66df-45e5-a643-2c2ebc9fc7c6\") " pod="openshift-marketplace/redhat-marketplace-gv2cj" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.479425 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a527979f-66df-45e5-a643-2c2ebc9fc7c6-catalog-content\") pod \"redhat-marketplace-gv2cj\" (UID: \"a527979f-66df-45e5-a643-2c2ebc9fc7c6\") " pod="openshift-marketplace/redhat-marketplace-gv2cj" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.479638 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.479735 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a527979f-66df-45e5-a643-2c2ebc9fc7c6-utilities\") pod \"redhat-marketplace-gv2cj\" (UID: \"a527979f-66df-45e5-a643-2c2ebc9fc7c6\") " pod="openshift-marketplace/redhat-marketplace-gv2cj" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.479925 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a527979f-66df-45e5-a643-2c2ebc9fc7c6-catalog-content\") pod \"redhat-marketplace-gv2cj\" (UID: \"a527979f-66df-45e5-a643-2c2ebc9fc7c6\") " pod="openshift-marketplace/redhat-marketplace-gv2cj" Jan 30 00:11:52 crc kubenswrapper[5113]: E0130 00:11:52.479999 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:52.979978241 +0000 UTC m=+133.052583688 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.480130 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a527979f-66df-45e5-a643-2c2ebc9fc7c6-utilities\") pod \"redhat-marketplace-gv2cj\" (UID: \"a527979f-66df-45e5-a643-2c2ebc9fc7c6\") " pod="openshift-marketplace/redhat-marketplace-gv2cj" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.510975 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fwsm\" (UniqueName: \"kubernetes.io/projected/a527979f-66df-45e5-a643-2c2ebc9fc7c6-kube-api-access-6fwsm\") pod \"redhat-marketplace-gv2cj\" (UID: \"a527979f-66df-45e5-a643-2c2ebc9fc7c6\") " pod="openshift-marketplace/redhat-marketplace-gv2cj" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.580835 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:52 crc kubenswrapper[5113]: E0130 00:11:52.581089 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.081071995 +0000 UTC m=+133.153677362 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.581183 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-l7w75\" (UniqueName: \"kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75\") pod \"network-check-source-5bb8f5cd97-xdvz5\" (UID: \"f863fff9-286a-45fa-b8f0-8a86994b8440\") " pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.581270 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.581336 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gwt8b\" (UniqueName: \"kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b\") pod \"network-check-target-fhkjl\" (UID: \"17b87002-b798-480a-8e17-83053d698239\") " pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.581398 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.581899 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:52 crc kubenswrapper[5113]: E0130 00:11:52.582210 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.08219852 +0000 UTC m=+133.154804007 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.583917 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-nginx-conf\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.586553 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwt8b\" (UniqueName: \"kubernetes.io/projected/17b87002-b798-480a-8e17-83053d698239-kube-api-access-gwt8b\") pod \"network-check-target-fhkjl\" (UID: \"17b87002-b798-480a-8e17-83053d698239\") " pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.587494 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/6a9ae5f6-97bd-46ac-bafa-ca1b4452a141-networking-console-plugin-cert\") pod \"networking-console-plugin-5ff7774fd9-nljh6\" (UID: \"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141\") " pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.589329 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7w75\" (UniqueName: \"kubernetes.io/projected/f863fff9-286a-45fa-b8f0-8a86994b8440-kube-api-access-l7w75\") pod \"network-check-source-5bb8f5cd97-xdvz5\" (UID: \"f863fff9-286a-45fa-b8f0-8a86994b8440\") " pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.602947 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.616221 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.626487 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.647235 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gv2cj" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.651223 5113 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-pdqxh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:11:52 crc kubenswrapper[5113]: [-]has-synced failed: reason withheld Jan 30 00:11:52 crc kubenswrapper[5113]: [+]process-running ok Jan 30 00:11:52 crc kubenswrapper[5113]: healthz check failed Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.651298 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" podUID="8547dc44-d12a-4cf9-a12f-1a1f2dcb3433" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.683960 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.684116 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs\") pod \"network-metrics-daemon-qx4gj\" (UID: \"d655d34c-2969-43f2-8e93-455507c7cfda\") " pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:52 crc kubenswrapper[5113]: E0130 00:11:52.684846 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.184820722 +0000 UTC m=+133.257426099 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.690634 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d655d34c-2969-43f2-8e93-455507c7cfda-metrics-certs\") pod \"network-metrics-daemon-qx4gj\" (UID: \"d655d34c-2969-43f2-8e93-455507c7cfda\") " pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.701609 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qx4gj" Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.710658 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lpzhr"] Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.785951 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:52 crc kubenswrapper[5113]: E0130 00:11:52.786393 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.2863781 +0000 UTC m=+133.358983477 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.887959 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:52 crc kubenswrapper[5113]: E0130 00:11:52.888143 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.388112084 +0000 UTC m=+133.460717461 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:52 crc kubenswrapper[5113]: I0130 00:11:52.888535 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:52 crc kubenswrapper[5113]: E0130 00:11:52.889148 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.389120596 +0000 UTC m=+133.461725973 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:52.993787 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:53 crc kubenswrapper[5113]: E0130 00:11:52.994033 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.493988727 +0000 UTC m=+133.566594104 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:52.998585 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:53 crc kubenswrapper[5113]: E0130 00:11:52.999181 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.499160208 +0000 UTC m=+133.571765585 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.016857 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lpzhr" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.058568 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lpzhr"] Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.100984 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.101402 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f80d9f8-15a6-42da-bfff-15ae5c525dca-catalog-content\") pod \"redhat-marketplace-lpzhr\" (UID: \"9f80d9f8-15a6-42da-bfff-15ae5c525dca\") " pod="openshift-marketplace/redhat-marketplace-lpzhr" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.101462 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f80d9f8-15a6-42da-bfff-15ae5c525dca-utilities\") pod \"redhat-marketplace-lpzhr\" (UID: \"9f80d9f8-15a6-42da-bfff-15ae5c525dca\") " pod="openshift-marketplace/redhat-marketplace-lpzhr" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.101564 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5v8w2\" (UniqueName: \"kubernetes.io/projected/9f80d9f8-15a6-42da-bfff-15ae5c525dca-kube-api-access-5v8w2\") pod \"redhat-marketplace-lpzhr\" (UID: \"9f80d9f8-15a6-42da-bfff-15ae5c525dca\") " pod="openshift-marketplace/redhat-marketplace-lpzhr" Jan 30 00:11:53 crc kubenswrapper[5113]: E0130 00:11:53.101841 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.601807621 +0000 UTC m=+133.674412998 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.201743 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" event={"ID":"abc0d911-0769-4cbb-8a02-d5ced71ed5b5","Type":"ContainerStarted","Data":"4859f957bd180d615fd3ba6682531134839d461d8b7d0be8bdaabc1c2f2588c6"} Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.202563 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-5v8w2\" (UniqueName: \"kubernetes.io/projected/9f80d9f8-15a6-42da-bfff-15ae5c525dca-kube-api-access-5v8w2\") pod \"redhat-marketplace-lpzhr\" (UID: \"9f80d9f8-15a6-42da-bfff-15ae5c525dca\") " pod="openshift-marketplace/redhat-marketplace-lpzhr" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.202608 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f80d9f8-15a6-42da-bfff-15ae5c525dca-catalog-content\") pod \"redhat-marketplace-lpzhr\" (UID: \"9f80d9f8-15a6-42da-bfff-15ae5c525dca\") " pod="openshift-marketplace/redhat-marketplace-lpzhr" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.202649 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f80d9f8-15a6-42da-bfff-15ae5c525dca-utilities\") pod \"redhat-marketplace-lpzhr\" (UID: \"9f80d9f8-15a6-42da-bfff-15ae5c525dca\") " pod="openshift-marketplace/redhat-marketplace-lpzhr" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.202687 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:53 crc kubenswrapper[5113]: E0130 00:11:53.202991 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.702978057 +0000 UTC m=+133.775583434 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.203488 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f80d9f8-15a6-42da-bfff-15ae5c525dca-utilities\") pod \"redhat-marketplace-lpzhr\" (UID: \"9f80d9f8-15a6-42da-bfff-15ae5c525dca\") " pod="openshift-marketplace/redhat-marketplace-lpzhr" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.204127 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f80d9f8-15a6-42da-bfff-15ae5c525dca-catalog-content\") pod \"redhat-marketplace-lpzhr\" (UID: \"9f80d9f8-15a6-42da-bfff-15ae5c525dca\") " pod="openshift-marketplace/redhat-marketplace-lpzhr" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.241128 5113 generic.go:358] "Generic (PLEG): container finished" podID="ed2eb5b7-1d01-4029-86f8-47a057a0352e" containerID="18755a1375e01f5277b223cc953ca9964c813a0ceaa26f01a78f8005ac9b0694" exitCode=0 Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.242272 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbzxk" event={"ID":"ed2eb5b7-1d01-4029-86f8-47a057a0352e","Type":"ContainerDied","Data":"18755a1375e01f5277b223cc953ca9964c813a0ceaa26f01a78f8005ac9b0694"} Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.292965 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-5v8w2\" (UniqueName: \"kubernetes.io/projected/9f80d9f8-15a6-42da-bfff-15ae5c525dca-kube-api-access-5v8w2\") pod \"redhat-marketplace-lpzhr\" (UID: \"9f80d9f8-15a6-42da-bfff-15ae5c525dca\") " pod="openshift-marketplace/redhat-marketplace-lpzhr" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.295671 5113 generic.go:358] "Generic (PLEG): container finished" podID="aab16c0a-2713-42b2-b14a-c5da3434319a" containerID="f6aefc2c01b694d364dc453b2ebd993dc534f1cf383284e9cc9777813a7cb9ea" exitCode=0 Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.296218 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/revision-pruner-6-crc" event={"ID":"aab16c0a-2713-42b2-b14a-c5da3434319a","Type":"ContainerDied","Data":"f6aefc2c01b694d364dc453b2ebd993dc534f1cf383284e9cc9777813a7cb9ea"} Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.318130 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:53 crc kubenswrapper[5113]: E0130 00:11:53.318370 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.818346025 +0000 UTC m=+133.890951642 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.318931 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:53 crc kubenswrapper[5113]: E0130 00:11:53.319966 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.819940585 +0000 UTC m=+133.892545962 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.320896 5113 generic.go:358] "Generic (PLEG): container finished" podID="6b49ee2f-63ad-4580-b29c-8ebcade15f14" containerID="0e50c4da6ebbebbbc6a616e4a68a340536c2e42212315425689417cd71361071" exitCode=0 Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.321826 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hr22j" event={"ID":"6b49ee2f-63ad-4580-b29c-8ebcade15f14","Type":"ContainerDied","Data":"0e50c4da6ebbebbbc6a616e4a68a340536c2e42212315425689417cd71361071"} Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.362667 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lpzhr" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.404240 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tvfsp"] Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.421120 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvfsp" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.421613 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:53 crc kubenswrapper[5113]: E0130 00:11:53.422142 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:53.922112822 +0000 UTC m=+133.994718199 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.424940 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tvfsp"] Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.434185 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"redhat-operators-dockercfg-9gxlh\"" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.524079 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.524641 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84d95933-fc27-469e-91be-cb781299ccd2-utilities\") pod \"redhat-operators-tvfsp\" (UID: \"84d95933-fc27-469e-91be-cb781299ccd2\") " pod="openshift-marketplace/redhat-operators-tvfsp" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.524711 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84d95933-fc27-469e-91be-cb781299ccd2-catalog-content\") pod \"redhat-operators-tvfsp\" (UID: \"84d95933-fc27-469e-91be-cb781299ccd2\") " pod="openshift-marketplace/redhat-operators-tvfsp" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.524753 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tpq7\" (UniqueName: \"kubernetes.io/projected/84d95933-fc27-469e-91be-cb781299ccd2-kube-api-access-4tpq7\") pod \"redhat-operators-tvfsp\" (UID: \"84d95933-fc27-469e-91be-cb781299ccd2\") " pod="openshift-marketplace/redhat-operators-tvfsp" Jan 30 00:11:53 crc kubenswrapper[5113]: E0130 00:11:53.524980 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:54.024962191 +0000 UTC m=+134.097567568 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.629324 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.629856 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-4tpq7\" (UniqueName: \"kubernetes.io/projected/84d95933-fc27-469e-91be-cb781299ccd2-kube-api-access-4tpq7\") pod \"redhat-operators-tvfsp\" (UID: \"84d95933-fc27-469e-91be-cb781299ccd2\") " pod="openshift-marketplace/redhat-operators-tvfsp" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.630089 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84d95933-fc27-469e-91be-cb781299ccd2-utilities\") pod \"redhat-operators-tvfsp\" (UID: \"84d95933-fc27-469e-91be-cb781299ccd2\") " pod="openshift-marketplace/redhat-operators-tvfsp" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.630269 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84d95933-fc27-469e-91be-cb781299ccd2-catalog-content\") pod \"redhat-operators-tvfsp\" (UID: \"84d95933-fc27-469e-91be-cb781299ccd2\") " pod="openshift-marketplace/redhat-operators-tvfsp" Jan 30 00:11:53 crc kubenswrapper[5113]: E0130 00:11:53.630802 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:54.130774312 +0000 UTC m=+134.203379689 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.631390 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84d95933-fc27-469e-91be-cb781299ccd2-catalog-content\") pod \"redhat-operators-tvfsp\" (UID: \"84d95933-fc27-469e-91be-cb781299ccd2\") " pod="openshift-marketplace/redhat-operators-tvfsp" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.631598 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84d95933-fc27-469e-91be-cb781299ccd2-utilities\") pod \"redhat-operators-tvfsp\" (UID: \"84d95933-fc27-469e-91be-cb781299ccd2\") " pod="openshift-marketplace/redhat-operators-tvfsp" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.645061 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.648155 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gv2cj"] Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.658300 5113 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-pdqxh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:11:53 crc kubenswrapper[5113]: [-]has-synced failed: reason withheld Jan 30 00:11:53 crc kubenswrapper[5113]: [+]process-running ok Jan 30 00:11:53 crc kubenswrapper[5113]: healthz check failed Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.658403 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" podUID="8547dc44-d12a-4cf9-a12f-1a1f2dcb3433" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.660871 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tpq7\" (UniqueName: \"kubernetes.io/projected/84d95933-fc27-469e-91be-cb781299ccd2-kube-api-access-4tpq7\") pod \"redhat-operators-tvfsp\" (UID: \"84d95933-fc27-469e-91be-cb781299ccd2\") " pod="openshift-marketplace/redhat-operators-tvfsp" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.676179 5113 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.717833 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dgdzx"] Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.732778 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:53 crc kubenswrapper[5113]: E0130 00:11:53.733233 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName: nodeName:}" failed. No retries permitted until 2026-01-30 00:11:54.233212077 +0000 UTC m=+134.305817454 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "image-registry-66587d64c8-bnj47" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.773550 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.773656 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dgdzx"] Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.774389 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dgdzx" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.787393 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-9ddfb9f55-s5qkt" Jan 30 00:11:53 crc kubenswrapper[5113]: E0130 00:11:53.836185 5113 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.858166 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvfsp" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.860290 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.860753 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-utilities\") pod \"redhat-operators-dgdzx\" (UID: \"b8b3ed2e-5058-4d4f-ba3a-e1a083531407\") " pod="openshift-marketplace/redhat-operators-dgdzx" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.860822 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-catalog-content\") pod \"redhat-operators-dgdzx\" (UID: \"b8b3ed2e-5058-4d4f-ba3a-e1a083531407\") " pod="openshift-marketplace/redhat-operators-dgdzx" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.861012 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7blrm\" (UniqueName: \"kubernetes.io/projected/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-kube-api-access-7blrm\") pod \"redhat-operators-dgdzx\" (UID: \"b8b3ed2e-5058-4d4f-ba3a-e1a083531407\") " pod="openshift-marketplace/redhat-operators-dgdzx" Jan 30 00:11:53 crc kubenswrapper[5113]: E0130 00:11:53.861433 5113 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2 podName:9e9b5059-1b3e-4067-a63d-2952cbe863af nodeName:}" failed. No retries permitted until 2026-01-30 00:11:54.361413614 +0000 UTC m=+134.434018991 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 00:11:53 crc kubenswrapper[5113]: E0130 00:11:53.879086 5113 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.886834 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-67c89758df-qh9jw" Jan 30 00:11:53 crc kubenswrapper[5113]: E0130 00:11:53.909204 5113 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:11:53 crc kubenswrapper[5113]: E0130 00:11:53.909320 5113 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" podUID="d245ee6c-4b68-41b6-b516-38a882666394" containerName="kube-multus-additional-cni-plugins" probeResult="unknown" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.914273 5113 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-30T00:11:53.676210145Z","UUID":"c59d1201-5a0b-4f6b-ae1b-2ff05c3ecb62","Handler":null,"Name":"","Endpoint":""} Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.922168 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-qx4gj"] Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.928870 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.953222 5113 csi_plugin.go:106] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.953270 5113 csi_plugin.go:119] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.973318 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.973471 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-7blrm\" (UniqueName: \"kubernetes.io/projected/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-kube-api-access-7blrm\") pod \"redhat-operators-dgdzx\" (UID: \"b8b3ed2e-5058-4d4f-ba3a-e1a083531407\") " pod="openshift-marketplace/redhat-operators-dgdzx" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.973651 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-utilities\") pod \"redhat-operators-dgdzx\" (UID: \"b8b3ed2e-5058-4d4f-ba3a-e1a083531407\") " pod="openshift-marketplace/redhat-operators-dgdzx" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.973690 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-catalog-content\") pod \"redhat-operators-dgdzx\" (UID: \"b8b3ed2e-5058-4d4f-ba3a-e1a083531407\") " pod="openshift-marketplace/redhat-operators-dgdzx" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.975071 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-utilities\") pod \"redhat-operators-dgdzx\" (UID: \"b8b3ed2e-5058-4d4f-ba3a-e1a083531407\") " pod="openshift-marketplace/redhat-operators-dgdzx" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.975339 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-catalog-content\") pod \"redhat-operators-dgdzx\" (UID: \"b8b3ed2e-5058-4d4f-ba3a-e1a083531407\") " pod="openshift-marketplace/redhat-operators-dgdzx" Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.980939 5113 csi_attacher.go:373] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 30 00:11:53 crc kubenswrapper[5113]: I0130 00:11:53.981022 5113 operation_generator.go:557] "MountVolume.MountDevice succeeded for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b1264ac67579ad07e7e9003054d44fe40dd55285a4b2f7dc74e48be1aee0868a/globalmount\"" pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.084268 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-7blrm\" (UniqueName: \"kubernetes.io/projected/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-kube-api-access-7blrm\") pod \"redhat-operators-dgdzx\" (UID: \"b8b3ed2e-5058-4d4f-ba3a-e1a083531407\") " pod="openshift-marketplace/redhat-operators-dgdzx" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.156968 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dgdzx" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.183418 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-66587d64c8-bnj47\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.217324 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.310130 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-config-volume" (OuterVolumeSpecName: "config-volume") pod "89390c7a-b6e2-43fb-8b63-4df42bbbd9b5" (UID: "89390c7a-b6e2-43fb-8b63-4df42bbbd9b5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.308425 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-config-volume\") pod \"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5\" (UID: \"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5\") " Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.310425 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-secret-volume\") pod \"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5\" (UID: \"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5\") " Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.310531 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"9e9b5059-1b3e-4067-a63d-2952cbe863af\" (UID: \"9e9b5059-1b3e-4067-a63d-2952cbe863af\") " Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.310615 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b4c2d\" (UniqueName: \"kubernetes.io/projected/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-kube-api-access-b4c2d\") pod \"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5\" (UID: \"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5\") " Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.315512 5113 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-config-volume\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.326983 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "89390c7a-b6e2-43fb-8b63-4df42bbbd9b5" (UID: "89390c7a-b6e2-43fb-8b63-4df42bbbd9b5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.367500 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"registry-dockercfg-6w67b\"" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.371729 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.382984 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-kube-api-access-b4c2d" (OuterVolumeSpecName: "kube-api-access-b4c2d") pod "89390c7a-b6e2-43fb-8b63-4df42bbbd9b5" (UID: "89390c7a-b6e2-43fb-8b63-4df42bbbd9b5"). InnerVolumeSpecName "kube-api-access-b4c2d". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.394301 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lpzhr"] Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.398963 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (OuterVolumeSpecName: "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2") pod "9e9b5059-1b3e-4067-a63d-2952cbe863af" (UID: "9e9b5059-1b3e-4067-a63d-2952cbe863af"). InnerVolumeSpecName "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2". PluginName "kubernetes.io/csi", VolumeGIDValue "" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.408285 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gv2cj" event={"ID":"a527979f-66df-45e5-a643-2c2ebc9fc7c6","Type":"ContainerStarted","Data":"27f1936deae4b5d9a448791023c46364d6b866267f52a1eb79b78fccdd7c65ac"} Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.413139 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" event={"ID":"abc0d911-0769-4cbb-8a02-d5ced71ed5b5","Type":"ContainerStarted","Data":"45b59cc8e17b7663b89e7f34835e1a30f4fc19ed66543dc03202918c6a8eb9c4"} Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.422814 5113 reconciler_common.go:299] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.422852 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-b4c2d\" (UniqueName: \"kubernetes.io/projected/89390c7a-b6e2-43fb-8b63-4df42bbbd9b5-kube-api-access-b4c2d\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.422966 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" event={"ID":"f863fff9-286a-45fa-b8f0-8a86994b8440","Type":"ContainerStarted","Data":"98985d84688af02123596bd74fae8bf5188bf1796e3285aa05ba95b94de02e71"} Jan 30 00:11:54 crc kubenswrapper[5113]: W0130 00:11:54.429786 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9f80d9f8_15a6_42da_bfff_15ae5c525dca.slice/crio-ed47f1883fe5c990bc2001395f6d398d1b7fd926eb1cb57a387ba9a9389cc3a7 WatchSource:0}: Error finding container ed47f1883fe5c990bc2001395f6d398d1b7fd926eb1cb57a387ba9a9389cc3a7: Status 404 returned error can't find the container with id ed47f1883fe5c990bc2001395f6d398d1b7fd926eb1cb57a387ba9a9389cc3a7 Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.444581 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-qx4gj" event={"ID":"d655d34c-2969-43f2-8e93-455507c7cfda","Type":"ContainerStarted","Data":"44e2fe878997cef43549ce1e82fdf02fa54677464d7080c188a55129097b2a53"} Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.460602 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" event={"ID":"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141","Type":"ContainerStarted","Data":"d2a47f1df068ec7e4da10768e98f7e3aac93e1b6af8eecd9ff65471ab7e5a60f"} Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.463063 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" event={"ID":"89390c7a-b6e2-43fb-8b63-4df42bbbd9b5","Type":"ContainerDied","Data":"36304602d6b825edadb7bd48ad221a0f87857646071998b50fc915fd899f6ef5"} Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.463095 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="36304602d6b825edadb7bd48ad221a0f87857646071998b50fc915fd899f6ef5" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.463246 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495520-qdtrg" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.473129 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-fhkjl" event={"ID":"17b87002-b798-480a-8e17-83053d698239","Type":"ContainerStarted","Data":"e326f3bd98f0fda4c24883b2e722a2e6e8968f843b0770a0df1854642de61455"} Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.473196 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-fhkjl" event={"ID":"17b87002-b798-480a-8e17-83053d698239","Type":"ContainerStarted","Data":"363cb33c16555fecad668d351f8c1fd2c679ce0cce18001eee0f1d3cdb0ff68c"} Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.479731 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.656498 5113 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-pdqxh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:11:54 crc kubenswrapper[5113]: [-]has-synced failed: reason withheld Jan 30 00:11:54 crc kubenswrapper[5113]: [+]process-running ok Jan 30 00:11:54 crc kubenswrapper[5113]: healthz check failed Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.656718 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" podUID="8547dc44-d12a-4cf9-a12f-1a1f2dcb3433" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.745496 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tvfsp"] Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.848574 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e9b5059-1b3e-4067-a63d-2952cbe863af" path="/var/lib/kubelet/pods/9e9b5059-1b3e-4067-a63d-2952cbe863af/volumes" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.850351 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-11-crc"] Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.851083 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="89390c7a-b6e2-43fb-8b63-4df42bbbd9b5" containerName="collect-profiles" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.851100 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="89390c7a-b6e2-43fb-8b63-4df42bbbd9b5" containerName="collect-profiles" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.851233 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="89390c7a-b6e2-43fb-8b63-4df42bbbd9b5" containerName="collect-profiles" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.935873 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-11-crc"] Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.936155 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.943658 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-apiserver\"/\"kube-root-ca.crt\"" Jan 30 00:11:54 crc kubenswrapper[5113]: I0130 00:11:54.943884 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-apiserver\"/\"installer-sa-dockercfg-bqqnb\"" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.044666 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/08630e06-ed50-4792-97fb-e8d759867e2d-kubelet-dir\") pod \"revision-pruner-11-crc\" (UID: \"08630e06-ed50-4792-97fb-e8d759867e2d\") " pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.044763 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/08630e06-ed50-4792-97fb-e8d759867e2d-kube-api-access\") pod \"revision-pruner-11-crc\" (UID: \"08630e06-ed50-4792-97fb-e8d759867e2d\") " pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.080441 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66587d64c8-bnj47"] Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.102088 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dgdzx"] Jan 30 00:11:55 crc kubenswrapper[5113]: W0130 00:11:55.145349 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde29c822_8061_4f04_9a8a_b36f6ab0082e.slice/crio-714adcdce307ec70da1452a29a6a4efa4b772a48837822463ffb51f5fd1e91b2 WatchSource:0}: Error finding container 714adcdce307ec70da1452a29a6a4efa4b772a48837822463ffb51f5fd1e91b2: Status 404 returned error can't find the container with id 714adcdce307ec70da1452a29a6a4efa4b772a48837822463ffb51f5fd1e91b2 Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.145625 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/08630e06-ed50-4792-97fb-e8d759867e2d-kubelet-dir\") pod \"revision-pruner-11-crc\" (UID: \"08630e06-ed50-4792-97fb-e8d759867e2d\") " pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.145686 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/08630e06-ed50-4792-97fb-e8d759867e2d-kube-api-access\") pod \"revision-pruner-11-crc\" (UID: \"08630e06-ed50-4792-97fb-e8d759867e2d\") " pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.145998 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/08630e06-ed50-4792-97fb-e8d759867e2d-kubelet-dir\") pod \"revision-pruner-11-crc\" (UID: \"08630e06-ed50-4792-97fb-e8d759867e2d\") " pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.172244 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.179474 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/08630e06-ed50-4792-97fb-e8d759867e2d-kube-api-access\") pod \"revision-pruner-11-crc\" (UID: \"08630e06-ed50-4792-97fb-e8d759867e2d\") " pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.189778 5113 patch_prober.go:28] interesting pod/downloads-747b44746d-kxkvb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.189848 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-kxkvb" podUID="d6483c17-196a-4e41-8950-46d60c5505c9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.246730 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/aab16c0a-2713-42b2-b14a-c5da3434319a-kubelet-dir\") pod \"aab16c0a-2713-42b2-b14a-c5da3434319a\" (UID: \"aab16c0a-2713-42b2-b14a-c5da3434319a\") " Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.246789 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/aab16c0a-2713-42b2-b14a-c5da3434319a-kube-api-access\") pod \"aab16c0a-2713-42b2-b14a-c5da3434319a\" (UID: \"aab16c0a-2713-42b2-b14a-c5da3434319a\") " Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.246841 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/aab16c0a-2713-42b2-b14a-c5da3434319a-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "aab16c0a-2713-42b2-b14a-c5da3434319a" (UID: "aab16c0a-2713-42b2-b14a-c5da3434319a"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.247214 5113 reconciler_common.go:299] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/aab16c0a-2713-42b2-b14a-c5da3434319a-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.260742 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aab16c0a-2713-42b2-b14a-c5da3434319a-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "aab16c0a-2713-42b2-b14a-c5da3434319a" (UID: "aab16c0a-2713-42b2-b14a-c5da3434319a"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.276605 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.350006 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/aab16c0a-2713-42b2-b14a-c5da3434319a-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.553926 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-11-crc"] Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.555455 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/revision-pruner-6-crc" event={"ID":"aab16c0a-2713-42b2-b14a-c5da3434319a","Type":"ContainerDied","Data":"b9397763883e1c507c6d9176db8afe363c179d6b5b2a8e7b7f0d9c086fb01206"} Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.555479 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/revision-pruner-6-crc" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.555500 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b9397763883e1c507c6d9176db8afe363c179d6b5b2a8e7b7f0d9c086fb01206" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.563480 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-qx4gj" event={"ID":"d655d34c-2969-43f2-8e93-455507c7cfda","Type":"ContainerStarted","Data":"f4c24b6438393a5d4e02286001208e225b1cbd1845c6b47b5733dd73877d6b08"} Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.572645 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-5ff7774fd9-nljh6" event={"ID":"6a9ae5f6-97bd-46ac-bafa-ca1b4452a141","Type":"ContainerStarted","Data":"b3b846fae5ba4ecb017a4d402554ac2953d9de9e2e37b8e79d8a9a37bb542624"} Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.595203 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dgdzx" event={"ID":"b8b3ed2e-5058-4d4f-ba3a-e1a083531407","Type":"ContainerStarted","Data":"fa82106cd5f8efa5c8b15274fff3622e11e66133eeca5491c442394e38438d4d"} Jan 30 00:11:55 crc kubenswrapper[5113]: W0130 00:11:55.611030 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod08630e06_ed50_4792_97fb_e8d759867e2d.slice/crio-0988e464ce524ddd16afe36c38b5b5a09dd232a54fdaba58d56fc7601a88b150 WatchSource:0}: Error finding container 0988e464ce524ddd16afe36c38b5b5a09dd232a54fdaba58d56fc7601a88b150: Status 404 returned error can't find the container with id 0988e464ce524ddd16afe36c38b5b5a09dd232a54fdaba58d56fc7601a88b150 Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.625786 5113 generic.go:358] "Generic (PLEG): container finished" podID="9f80d9f8-15a6-42da-bfff-15ae5c525dca" containerID="cd771ad4b861dd103dc6c7ad20fcd624f9c31b3e84c8549d33b5aa9617ac37f2" exitCode=0 Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.625930 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpzhr" event={"ID":"9f80d9f8-15a6-42da-bfff-15ae5c525dca","Type":"ContainerDied","Data":"cd771ad4b861dd103dc6c7ad20fcd624f9c31b3e84c8549d33b5aa9617ac37f2"} Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.625965 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpzhr" event={"ID":"9f80d9f8-15a6-42da-bfff-15ae5c525dca","Type":"ContainerStarted","Data":"ed47f1883fe5c990bc2001395f6d398d1b7fd926eb1cb57a387ba9a9389cc3a7"} Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.630711 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66587d64c8-bnj47" event={"ID":"de29c822-8061-4f04-9a8a-b36f6ab0082e","Type":"ContainerStarted","Data":"714adcdce307ec70da1452a29a6a4efa4b772a48837822463ffb51f5fd1e91b2"} Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.632561 5113 generic.go:358] "Generic (PLEG): container finished" podID="84d95933-fc27-469e-91be-cb781299ccd2" containerID="c655e0adbdb502a98eee43fda17401c76f449dfd25e615dd826a6c96e4b83084" exitCode=0 Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.632669 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvfsp" event={"ID":"84d95933-fc27-469e-91be-cb781299ccd2","Type":"ContainerDied","Data":"c655e0adbdb502a98eee43fda17401c76f449dfd25e615dd826a6c96e4b83084"} Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.632700 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvfsp" event={"ID":"84d95933-fc27-469e-91be-cb781299ccd2","Type":"ContainerStarted","Data":"d7e6383266e4008416803bcf65d982846013bf77401749f629fba69de42ec7b1"} Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.666947 5113 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-pdqxh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:11:55 crc kubenswrapper[5113]: [-]has-synced failed: reason withheld Jan 30 00:11:55 crc kubenswrapper[5113]: [+]process-running ok Jan 30 00:11:55 crc kubenswrapper[5113]: healthz check failed Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.667454 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" podUID="8547dc44-d12a-4cf9-a12f-1a1f2dcb3433" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.688536 5113 generic.go:358] "Generic (PLEG): container finished" podID="a527979f-66df-45e5-a643-2c2ebc9fc7c6" containerID="354f34ce988303e83d4b70d0ac14bc66aae6dd5ab0d5aa23df3fb842c6c55cd0" exitCode=0 Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.688869 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gv2cj" event={"ID":"a527979f-66df-45e5-a643-2c2ebc9fc7c6","Type":"ContainerDied","Data":"354f34ce988303e83d4b70d0ac14bc66aae6dd5ab0d5aa23df3fb842c6c55cd0"} Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.717884 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" event={"ID":"abc0d911-0769-4cbb-8a02-d5ced71ed5b5","Type":"ContainerStarted","Data":"b6b5a8e74a8b301e613cfbcb0c35d87dc7531fc59e3defb1ee7cee475fd512dd"} Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.724598 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-5bb8f5cd97-xdvz5" event={"ID":"f863fff9-286a-45fa-b8f0-8a86994b8440","Type":"ContainerStarted","Data":"a05bc1ca837cde127ebc8607e09e4078a76ba70e87602be12c28f78033734fae"} Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.741304 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-l5rrt" podStartSLOduration=17.741282361 podStartE2EDuration="17.741282361s" podCreationTimestamp="2026-01-30 00:11:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:55.739270298 +0000 UTC m=+135.811875675" watchObservedRunningTime="2026-01-30 00:11:55.741282361 +0000 UTC m=+135.813887738" Jan 30 00:11:55 crc kubenswrapper[5113]: I0130 00:11:55.936484 5113 ???:1] "http: TLS handshake error from 192.168.126.11:42728: no serving certificate available for the kubelet" Jan 30 00:11:56 crc kubenswrapper[5113]: I0130 00:11:56.045141 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-9wphq" Jan 30 00:11:56 crc kubenswrapper[5113]: I0130 00:11:56.648356 5113 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-pdqxh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:11:56 crc kubenswrapper[5113]: [-]has-synced failed: reason withheld Jan 30 00:11:56 crc kubenswrapper[5113]: [+]process-running ok Jan 30 00:11:56 crc kubenswrapper[5113]: healthz check failed Jan 30 00:11:56 crc kubenswrapper[5113]: I0130 00:11:56.648807 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" podUID="8547dc44-d12a-4cf9-a12f-1a1f2dcb3433" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:56 crc kubenswrapper[5113]: I0130 00:11:56.735757 5113 generic.go:358] "Generic (PLEG): container finished" podID="b8b3ed2e-5058-4d4f-ba3a-e1a083531407" containerID="61986be761658ca62966f6c06e4e6d9d0a0999ea6c0e87ded2f78581da0f943d" exitCode=0 Jan 30 00:11:56 crc kubenswrapper[5113]: I0130 00:11:56.735810 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dgdzx" event={"ID":"b8b3ed2e-5058-4d4f-ba3a-e1a083531407","Type":"ContainerDied","Data":"61986be761658ca62966f6c06e4e6d9d0a0999ea6c0e87ded2f78581da0f943d"} Jan 30 00:11:56 crc kubenswrapper[5113]: I0130 00:11:56.740626 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66587d64c8-bnj47" event={"ID":"de29c822-8061-4f04-9a8a-b36f6ab0082e","Type":"ContainerStarted","Data":"fc8a644997413f585441a09ac509df0c34e78409a4372ce967ef4a6cbe8cd030"} Jan 30 00:11:56 crc kubenswrapper[5113]: I0130 00:11:56.740878 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:11:56 crc kubenswrapper[5113]: I0130 00:11:56.745110 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-11-crc" event={"ID":"08630e06-ed50-4792-97fb-e8d759867e2d","Type":"ContainerStarted","Data":"1c7659b31bfa4959d6ea4cd520b27d12dfd1d2a02a25473755abc6145504e88b"} Jan 30 00:11:56 crc kubenswrapper[5113]: I0130 00:11:56.745159 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-11-crc" event={"ID":"08630e06-ed50-4792-97fb-e8d759867e2d","Type":"ContainerStarted","Data":"0988e464ce524ddd16afe36c38b5b5a09dd232a54fdaba58d56fc7601a88b150"} Jan 30 00:11:56 crc kubenswrapper[5113]: I0130 00:11:56.763761 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-qx4gj" event={"ID":"d655d34c-2969-43f2-8e93-455507c7cfda","Type":"ContainerStarted","Data":"41c91f5ab2d5b3a3f0c668fb1450afd080059979a3b352a0c3eac9b81b672d29"} Jan 30 00:11:56 crc kubenswrapper[5113]: I0130 00:11:56.790875 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66587d64c8-bnj47" podStartSLOduration=113.790860473 podStartE2EDuration="1m53.790860473s" podCreationTimestamp="2026-01-30 00:10:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:56.787685004 +0000 UTC m=+136.860290381" watchObservedRunningTime="2026-01-30 00:11:56.790860473 +0000 UTC m=+136.863465850" Jan 30 00:11:56 crc kubenswrapper[5113]: I0130 00:11:56.819542 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-11-crc" podStartSLOduration=2.819507174 podStartE2EDuration="2.819507174s" podCreationTimestamp="2026-01-30 00:11:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:56.808026717 +0000 UTC m=+136.880632094" watchObservedRunningTime="2026-01-30 00:11:56.819507174 +0000 UTC m=+136.892112551" Jan 30 00:11:56 crc kubenswrapper[5113]: I0130 00:11:56.859394 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-qx4gj" podStartSLOduration=114.859371234 podStartE2EDuration="1m54.859371234s" podCreationTimestamp="2026-01-30 00:10:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:11:56.844511922 +0000 UTC m=+136.917117299" watchObservedRunningTime="2026-01-30 00:11:56.859371234 +0000 UTC m=+136.931976601" Jan 30 00:11:57 crc kubenswrapper[5113]: I0130 00:11:57.647855 5113 patch_prober.go:28] interesting pod/router-default-68cf44c8b8-pdqxh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 00:11:57 crc kubenswrapper[5113]: [+]has-synced ok Jan 30 00:11:57 crc kubenswrapper[5113]: [+]process-running ok Jan 30 00:11:57 crc kubenswrapper[5113]: healthz check failed Jan 30 00:11:57 crc kubenswrapper[5113]: I0130 00:11:57.648235 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" podUID="8547dc44-d12a-4cf9-a12f-1a1f2dcb3433" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 00:11:57 crc kubenswrapper[5113]: I0130 00:11:57.787881 5113 generic.go:358] "Generic (PLEG): container finished" podID="08630e06-ed50-4792-97fb-e8d759867e2d" containerID="1c7659b31bfa4959d6ea4cd520b27d12dfd1d2a02a25473755abc6145504e88b" exitCode=0 Jan 30 00:11:57 crc kubenswrapper[5113]: I0130 00:11:57.788877 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-11-crc" event={"ID":"08630e06-ed50-4792-97fb-e8d759867e2d","Type":"ContainerDied","Data":"1c7659b31bfa4959d6ea4cd520b27d12dfd1d2a02a25473755abc6145504e88b"} Jan 30 00:11:58 crc kubenswrapper[5113]: I0130 00:11:58.647772 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:58 crc kubenswrapper[5113]: I0130 00:11:58.650578 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-68cf44c8b8-pdqxh" Jan 30 00:11:59 crc kubenswrapper[5113]: I0130 00:11:59.021099 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-5cdf44d969-vcjpp" Jan 30 00:11:59 crc kubenswrapper[5113]: I0130 00:11:59.030606 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:11:59 crc kubenswrapper[5113]: I0130 00:11:59.241711 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:59 crc kubenswrapper[5113]: I0130 00:11:59.292888 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/08630e06-ed50-4792-97fb-e8d759867e2d-kubelet-dir\") pod \"08630e06-ed50-4792-97fb-e8d759867e2d\" (UID: \"08630e06-ed50-4792-97fb-e8d759867e2d\") " Jan 30 00:11:59 crc kubenswrapper[5113]: I0130 00:11:59.293655 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/08630e06-ed50-4792-97fb-e8d759867e2d-kube-api-access\") pod \"08630e06-ed50-4792-97fb-e8d759867e2d\" (UID: \"08630e06-ed50-4792-97fb-e8d759867e2d\") " Jan 30 00:11:59 crc kubenswrapper[5113]: I0130 00:11:59.295505 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/08630e06-ed50-4792-97fb-e8d759867e2d-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "08630e06-ed50-4792-97fb-e8d759867e2d" (UID: "08630e06-ed50-4792-97fb-e8d759867e2d"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:11:59 crc kubenswrapper[5113]: I0130 00:11:59.312768 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08630e06-ed50-4792-97fb-e8d759867e2d-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "08630e06-ed50-4792-97fb-e8d759867e2d" (UID: "08630e06-ed50-4792-97fb-e8d759867e2d"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:11:59 crc kubenswrapper[5113]: I0130 00:11:59.395385 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/08630e06-ed50-4792-97fb-e8d759867e2d-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:59 crc kubenswrapper[5113]: I0130 00:11:59.395487 5113 reconciler_common.go:299] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/08630e06-ed50-4792-97fb-e8d759867e2d-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:11:59 crc kubenswrapper[5113]: I0130 00:11:59.810475 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-11-crc" Jan 30 00:11:59 crc kubenswrapper[5113]: I0130 00:11:59.810534 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-11-crc" event={"ID":"08630e06-ed50-4792-97fb-e8d759867e2d","Type":"ContainerDied","Data":"0988e464ce524ddd16afe36c38b5b5a09dd232a54fdaba58d56fc7601a88b150"} Jan 30 00:11:59 crc kubenswrapper[5113]: I0130 00:11:59.811234 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0988e464ce524ddd16afe36c38b5b5a09dd232a54fdaba58d56fc7601a88b150" Jan 30 00:11:59 crc kubenswrapper[5113]: I0130 00:11:59.855745 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:59 crc kubenswrapper[5113]: I0130 00:11:59.860935 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-64d44f6ddf-6z7rp" Jan 30 00:11:59 crc kubenswrapper[5113]: I0130 00:11:59.907696 5113 patch_prober.go:28] interesting pod/downloads-747b44746d-kxkvb container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 30 00:11:59 crc kubenswrapper[5113]: I0130 00:11:59.907792 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-747b44746d-kxkvb" podUID="d6483c17-196a-4e41-8950-46d60c5505c9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 30 00:12:01 crc kubenswrapper[5113]: I0130 00:12:01.366770 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:12:03 crc kubenswrapper[5113]: E0130 00:12:03.827210 5113 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:03 crc kubenswrapper[5113]: E0130 00:12:03.830013 5113 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:03 crc kubenswrapper[5113]: E0130 00:12:03.831627 5113 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:03 crc kubenswrapper[5113]: E0130 00:12:03.831748 5113 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" podUID="d245ee6c-4b68-41b6-b516-38a882666394" containerName="kube-multus-additional-cni-plugins" probeResult="unknown" Jan 30 00:12:05 crc kubenswrapper[5113]: I0130 00:12:05.182936 5113 patch_prober.go:28] interesting pod/downloads-747b44746d-kxkvb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 30 00:12:05 crc kubenswrapper[5113]: I0130 00:12:05.183043 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-kxkvb" podUID="d6483c17-196a-4e41-8950-46d60c5505c9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 30 00:12:06 crc kubenswrapper[5113]: I0130 00:12:06.214021 5113 ???:1] "http: TLS handshake error from 192.168.126.11:52484: no serving certificate available for the kubelet" Jan 30 00:12:06 crc kubenswrapper[5113]: I0130 00:12:06.790334 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-65b6cccf98-8rbrn"] Jan 30 00:12:06 crc kubenswrapper[5113]: I0130 00:12:06.790725 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" podUID="ffde9a75-3edf-462f-af90-c312c4f05986" containerName="controller-manager" containerID="cri-o://82396be9855059532497cebfeea008ef1c5c92af1e3c7a00558a4c6b91726aaf" gracePeriod=30 Jan 30 00:12:06 crc kubenswrapper[5113]: I0130 00:12:06.809552 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj"] Jan 30 00:12:06 crc kubenswrapper[5113]: I0130 00:12:06.809886 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" podUID="4669eb3c-24d5-4643-91d1-de96326757fa" containerName="route-controller-manager" containerID="cri-o://d2518e74c027a7fef75f3a28712197dbc5b4c4f39c226efe3d223cffcef991a2" gracePeriod=30 Jan 30 00:12:07 crc kubenswrapper[5113]: I0130 00:12:07.865600 5113 generic.go:358] "Generic (PLEG): container finished" podID="4669eb3c-24d5-4643-91d1-de96326757fa" containerID="d2518e74c027a7fef75f3a28712197dbc5b4c4f39c226efe3d223cffcef991a2" exitCode=0 Jan 30 00:12:07 crc kubenswrapper[5113]: I0130 00:12:07.865701 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" event={"ID":"4669eb3c-24d5-4643-91d1-de96326757fa","Type":"ContainerDied","Data":"d2518e74c027a7fef75f3a28712197dbc5b4c4f39c226efe3d223cffcef991a2"} Jan 30 00:12:07 crc kubenswrapper[5113]: I0130 00:12:07.869390 5113 generic.go:358] "Generic (PLEG): container finished" podID="ffde9a75-3edf-462f-af90-c312c4f05986" containerID="82396be9855059532497cebfeea008ef1c5c92af1e3c7a00558a4c6b91726aaf" exitCode=0 Jan 30 00:12:07 crc kubenswrapper[5113]: I0130 00:12:07.869511 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" event={"ID":"ffde9a75-3edf-462f-af90-c312c4f05986","Type":"ContainerDied","Data":"82396be9855059532497cebfeea008ef1c5c92af1e3c7a00558a4c6b91726aaf"} Jan 30 00:12:09 crc kubenswrapper[5113]: I0130 00:12:09.902449 5113 patch_prober.go:28] interesting pod/downloads-747b44746d-kxkvb container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 30 00:12:09 crc kubenswrapper[5113]: I0130 00:12:09.903095 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-747b44746d-kxkvb" podUID="d6483c17-196a-4e41-8950-46d60c5505c9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 30 00:12:09 crc kubenswrapper[5113]: I0130 00:12:09.903178 5113 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-747b44746d-kxkvb" Jan 30 00:12:09 crc kubenswrapper[5113]: I0130 00:12:09.903826 5113 kuberuntime_manager.go:1107] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"b34fac1ea852daac9a9beb2f536c0f45f6338061f0bc624a045fd1bf07edd47d"} pod="openshift-console/downloads-747b44746d-kxkvb" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 30 00:12:09 crc kubenswrapper[5113]: I0130 00:12:09.903897 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-console/downloads-747b44746d-kxkvb" podUID="d6483c17-196a-4e41-8950-46d60c5505c9" containerName="download-server" containerID="cri-o://b34fac1ea852daac9a9beb2f536c0f45f6338061f0bc624a045fd1bf07edd47d" gracePeriod=2 Jan 30 00:12:09 crc kubenswrapper[5113]: I0130 00:12:09.904725 5113 patch_prober.go:28] interesting pod/downloads-747b44746d-kxkvb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 30 00:12:09 crc kubenswrapper[5113]: I0130 00:12:09.904821 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-kxkvb" podUID="d6483c17-196a-4e41-8950-46d60c5505c9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 30 00:12:10 crc kubenswrapper[5113]: I0130 00:12:10.897939 5113 generic.go:358] "Generic (PLEG): container finished" podID="d6483c17-196a-4e41-8950-46d60c5505c9" containerID="b34fac1ea852daac9a9beb2f536c0f45f6338061f0bc624a045fd1bf07edd47d" exitCode=0 Jan 30 00:12:10 crc kubenswrapper[5113]: I0130 00:12:10.898009 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-747b44746d-kxkvb" event={"ID":"d6483c17-196a-4e41-8950-46d60c5505c9","Type":"ContainerDied","Data":"b34fac1ea852daac9a9beb2f536c0f45f6338061f0bc624a045fd1bf07edd47d"} Jan 30 00:12:13 crc kubenswrapper[5113]: E0130 00:12:13.827970 5113 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:13 crc kubenswrapper[5113]: E0130 00:12:13.830026 5113 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:13 crc kubenswrapper[5113]: E0130 00:12:13.831231 5113 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:13 crc kubenswrapper[5113]: E0130 00:12:13.831293 5113 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" podUID="d245ee6c-4b68-41b6-b516-38a882666394" containerName="kube-multus-additional-cni-plugins" probeResult="unknown" Jan 30 00:12:13 crc kubenswrapper[5113]: I0130 00:12:13.881445 5113 patch_prober.go:28] interesting pod/controller-manager-65b6cccf98-8rbrn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Jan 30 00:12:13 crc kubenswrapper[5113]: I0130 00:12:13.881613 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" podUID="ffde9a75-3edf-462f-af90-c312c4f05986" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Jan 30 00:12:15 crc kubenswrapper[5113]: I0130 00:12:15.181369 5113 patch_prober.go:28] interesting pod/route-controller-manager-776cdc94d6-99ddj container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 30 00:12:15 crc kubenswrapper[5113]: I0130 00:12:15.182209 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" podUID="4669eb3c-24d5-4643-91d1-de96326757fa" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 30 00:12:17 crc kubenswrapper[5113]: I0130 00:12:17.794658 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:12:18 crc kubenswrapper[5113]: I0130 00:12:18.958788 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-b5d9m_d245ee6c-4b68-41b6-b516-38a882666394/kube-multus-additional-cni-plugins/0.log" Jan 30 00:12:18 crc kubenswrapper[5113]: I0130 00:12:18.959271 5113 generic.go:358] "Generic (PLEG): container finished" podID="d245ee6c-4b68-41b6-b516-38a882666394" containerID="631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3" exitCode=137 Jan 30 00:12:18 crc kubenswrapper[5113]: I0130 00:12:18.959346 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" event={"ID":"d245ee6c-4b68-41b6-b516-38a882666394","Type":"ContainerDied","Data":"631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3"} Jan 30 00:12:19 crc kubenswrapper[5113]: I0130 00:12:19.906318 5113 patch_prober.go:28] interesting pod/downloads-747b44746d-kxkvb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 30 00:12:19 crc kubenswrapper[5113]: I0130 00:12:19.906459 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-kxkvb" podUID="d6483c17-196a-4e41-8950-46d60c5505c9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 30 00:12:20 crc kubenswrapper[5113]: I0130 00:12:20.063096 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-77f986bd66-nvsp7" Jan 30 00:12:23 crc kubenswrapper[5113]: E0130 00:12:23.825651 5113 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3 is running failed: container process not found" containerID="631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:23 crc kubenswrapper[5113]: E0130 00:12:23.826263 5113 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3 is running failed: container process not found" containerID="631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:23 crc kubenswrapper[5113]: E0130 00:12:23.826882 5113 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3 is running failed: container process not found" containerID="631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3" cmd=["/bin/bash","-c","test -f /ready/ready"] Jan 30 00:12:23 crc kubenswrapper[5113]: E0130 00:12:23.826919 5113 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3 is running failed: container process not found" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" podUID="d245ee6c-4b68-41b6-b516-38a882666394" containerName="kube-multus-additional-cni-plugins" probeResult="unknown" Jan 30 00:12:23 crc kubenswrapper[5113]: I0130 00:12:23.881219 5113 patch_prober.go:28] interesting pod/controller-manager-65b6cccf98-8rbrn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Jan 30 00:12:23 crc kubenswrapper[5113]: I0130 00:12:23.881323 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" podUID="ffde9a75-3edf-462f-af90-c312c4f05986" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.5:8443/healthz\": dial tcp 10.217.0.5:8443: connect: connection refused" Jan 30 00:12:25 crc kubenswrapper[5113]: I0130 00:12:25.181932 5113 patch_prober.go:28] interesting pod/route-controller-manager-776cdc94d6-99ddj container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 30 00:12:25 crc kubenswrapper[5113]: I0130 00:12:25.182043 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" podUID="4669eb3c-24d5-4643-91d1-de96326757fa" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 30 00:12:26 crc kubenswrapper[5113]: I0130 00:12:26.132405 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-fhkjl" Jan 30 00:12:26 crc kubenswrapper[5113]: I0130 00:12:26.723253 5113 ???:1] "http: TLS handshake error from 192.168.126.11:35742: no serving certificate available for the kubelet" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.177483 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-b5d9m_d245ee6c-4b68-41b6-b516-38a882666394/kube-multus-additional-cni-plugins/0.log" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.177859 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.182139 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.230213 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r"] Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.231020 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="08630e06-ed50-4792-97fb-e8d759867e2d" containerName="pruner" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.231045 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="08630e06-ed50-4792-97fb-e8d759867e2d" containerName="pruner" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.231080 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="d245ee6c-4b68-41b6-b516-38a882666394" containerName="kube-multus-additional-cni-plugins" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.231088 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="d245ee6c-4b68-41b6-b516-38a882666394" containerName="kube-multus-additional-cni-plugins" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.231103 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="4669eb3c-24d5-4643-91d1-de96326757fa" containerName="route-controller-manager" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.231109 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="4669eb3c-24d5-4643-91d1-de96326757fa" containerName="route-controller-manager" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.231122 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="aab16c0a-2713-42b2-b14a-c5da3434319a" containerName="pruner" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.231128 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="aab16c0a-2713-42b2-b14a-c5da3434319a" containerName="pruner" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.231255 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="aab16c0a-2713-42b2-b14a-c5da3434319a" containerName="pruner" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.231266 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="d245ee6c-4b68-41b6-b516-38a882666394" containerName="kube-multus-additional-cni-plugins" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.231274 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="08630e06-ed50-4792-97fb-e8d759867e2d" containerName="pruner" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.231288 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="4669eb3c-24d5-4643-91d1-de96326757fa" containerName="route-controller-manager" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.251627 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r"] Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.251867 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.261995 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kl7n4\" (UniqueName: \"kubernetes.io/projected/d245ee6c-4b68-41b6-b516-38a882666394-kube-api-access-kl7n4\") pod \"d245ee6c-4b68-41b6-b516-38a882666394\" (UID: \"d245ee6c-4b68-41b6-b516-38a882666394\") " Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.262067 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/d245ee6c-4b68-41b6-b516-38a882666394-ready\") pod \"d245ee6c-4b68-41b6-b516-38a882666394\" (UID: \"d245ee6c-4b68-41b6-b516-38a882666394\") " Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.262137 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/d245ee6c-4b68-41b6-b516-38a882666394-cni-sysctl-allowlist\") pod \"d245ee6c-4b68-41b6-b516-38a882666394\" (UID: \"d245ee6c-4b68-41b6-b516-38a882666394\") " Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.262247 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/4669eb3c-24d5-4643-91d1-de96326757fa-tmp\") pod \"4669eb3c-24d5-4643-91d1-de96326757fa\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.262313 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4669eb3c-24d5-4643-91d1-de96326757fa-serving-cert\") pod \"4669eb3c-24d5-4643-91d1-de96326757fa\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.262343 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4669eb3c-24d5-4643-91d1-de96326757fa-config\") pod \"4669eb3c-24d5-4643-91d1-de96326757fa\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.262382 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d245ee6c-4b68-41b6-b516-38a882666394-tuning-conf-dir\") pod \"d245ee6c-4b68-41b6-b516-38a882666394\" (UID: \"d245ee6c-4b68-41b6-b516-38a882666394\") " Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.262443 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbf8b\" (UniqueName: \"kubernetes.io/projected/4669eb3c-24d5-4643-91d1-de96326757fa-kube-api-access-xbf8b\") pod \"4669eb3c-24d5-4643-91d1-de96326757fa\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.262479 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4669eb3c-24d5-4643-91d1-de96326757fa-client-ca\") pod \"4669eb3c-24d5-4643-91d1-de96326757fa\" (UID: \"4669eb3c-24d5-4643-91d1-de96326757fa\") " Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.262670 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d245ee6c-4b68-41b6-b516-38a882666394-ready" (OuterVolumeSpecName: "ready") pod "d245ee6c-4b68-41b6-b516-38a882666394" (UID: "d245ee6c-4b68-41b6-b516-38a882666394"). InnerVolumeSpecName "ready". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.263888 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4669eb3c-24d5-4643-91d1-de96326757fa-config" (OuterVolumeSpecName: "config") pod "4669eb3c-24d5-4643-91d1-de96326757fa" (UID: "4669eb3c-24d5-4643-91d1-de96326757fa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.263943 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d245ee6c-4b68-41b6-b516-38a882666394-tuning-conf-dir" (OuterVolumeSpecName: "tuning-conf-dir") pod "d245ee6c-4b68-41b6-b516-38a882666394" (UID: "d245ee6c-4b68-41b6-b516-38a882666394"). InnerVolumeSpecName "tuning-conf-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.269942 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4669eb3c-24d5-4643-91d1-de96326757fa-tmp" (OuterVolumeSpecName: "tmp") pod "4669eb3c-24d5-4643-91d1-de96326757fa" (UID: "4669eb3c-24d5-4643-91d1-de96326757fa"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.270729 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d245ee6c-4b68-41b6-b516-38a882666394-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "d245ee6c-4b68-41b6-b516-38a882666394" (UID: "d245ee6c-4b68-41b6-b516-38a882666394"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.271234 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4669eb3c-24d5-4643-91d1-de96326757fa-client-ca" (OuterVolumeSpecName: "client-ca") pod "4669eb3c-24d5-4643-91d1-de96326757fa" (UID: "4669eb3c-24d5-4643-91d1-de96326757fa"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.277904 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4669eb3c-24d5-4643-91d1-de96326757fa-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "4669eb3c-24d5-4643-91d1-de96326757fa" (UID: "4669eb3c-24d5-4643-91d1-de96326757fa"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.278196 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d245ee6c-4b68-41b6-b516-38a882666394-kube-api-access-kl7n4" (OuterVolumeSpecName: "kube-api-access-kl7n4") pod "d245ee6c-4b68-41b6-b516-38a882666394" (UID: "d245ee6c-4b68-41b6-b516-38a882666394"). InnerVolumeSpecName "kube-api-access-kl7n4". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.287088 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4669eb3c-24d5-4643-91d1-de96326757fa-kube-api-access-xbf8b" (OuterVolumeSpecName: "kube-api-access-xbf8b") pod "4669eb3c-24d5-4643-91d1-de96326757fa" (UID: "4669eb3c-24d5-4643-91d1-de96326757fa"). InnerVolumeSpecName "kube-api-access-xbf8b". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.350840 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.364217 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2be7a636-2fc6-4739-a559-a637553541ad-tmp\") pod \"route-controller-manager-598bb7b487-b6l9r\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.364424 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2be7a636-2fc6-4739-a559-a637553541ad-client-ca\") pod \"route-controller-manager-598bb7b487-b6l9r\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.364504 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwfc7\" (UniqueName: \"kubernetes.io/projected/2be7a636-2fc6-4739-a559-a637553541ad-kube-api-access-xwfc7\") pod \"route-controller-manager-598bb7b487-b6l9r\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.364582 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2be7a636-2fc6-4739-a559-a637553541ad-config\") pod \"route-controller-manager-598bb7b487-b6l9r\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.364640 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2be7a636-2fc6-4739-a559-a637553541ad-serving-cert\") pod \"route-controller-manager-598bb7b487-b6l9r\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.364732 5113 reconciler_common.go:299] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/d245ee6c-4b68-41b6-b516-38a882666394-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.364747 5113 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/4669eb3c-24d5-4643-91d1-de96326757fa-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.364762 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4669eb3c-24d5-4643-91d1-de96326757fa-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.364777 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4669eb3c-24d5-4643-91d1-de96326757fa-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.364790 5113 reconciler_common.go:299] "Volume detached for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/d245ee6c-4b68-41b6-b516-38a882666394-tuning-conf-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.364803 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-xbf8b\" (UniqueName: \"kubernetes.io/projected/4669eb3c-24d5-4643-91d1-de96326757fa-kube-api-access-xbf8b\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.364815 5113 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4669eb3c-24d5-4643-91d1-de96326757fa-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.364829 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-kl7n4\" (UniqueName: \"kubernetes.io/projected/d245ee6c-4b68-41b6-b516-38a882666394-kube-api-access-kl7n4\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.364841 5113 reconciler_common.go:299] "Volume detached for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/d245ee6c-4b68-41b6-b516-38a882666394-ready\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.386926 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-c6c5d6787-f7lmt"] Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.387638 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="ffde9a75-3edf-462f-af90-c312c4f05986" containerName="controller-manager" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.387652 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffde9a75-3edf-462f-af90-c312c4f05986" containerName="controller-manager" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.387795 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="ffde9a75-3edf-462f-af90-c312c4f05986" containerName="controller-manager" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.427029 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-c6c5d6787-f7lmt"] Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.427281 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.465465 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-config\") pod \"ffde9a75-3edf-462f-af90-c312c4f05986\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.465624 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffde9a75-3edf-462f-af90-c312c4f05986-serving-cert\") pod \"ffde9a75-3edf-462f-af90-c312c4f05986\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.465770 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/ffde9a75-3edf-462f-af90-c312c4f05986-tmp\") pod \"ffde9a75-3edf-462f-af90-c312c4f05986\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.465832 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-client-ca\") pod \"ffde9a75-3edf-462f-af90-c312c4f05986\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.465860 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-proxy-ca-bundles\") pod \"ffde9a75-3edf-462f-af90-c312c4f05986\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.465892 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlbg8\" (UniqueName: \"kubernetes.io/projected/ffde9a75-3edf-462f-af90-c312c4f05986-kube-api-access-jlbg8\") pod \"ffde9a75-3edf-462f-af90-c312c4f05986\" (UID: \"ffde9a75-3edf-462f-af90-c312c4f05986\") " Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.466049 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2be7a636-2fc6-4739-a559-a637553541ad-serving-cert\") pod \"route-controller-manager-598bb7b487-b6l9r\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.466141 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2be7a636-2fc6-4739-a559-a637553541ad-tmp\") pod \"route-controller-manager-598bb7b487-b6l9r\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.466224 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2be7a636-2fc6-4739-a559-a637553541ad-client-ca\") pod \"route-controller-manager-598bb7b487-b6l9r\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.466262 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-xwfc7\" (UniqueName: \"kubernetes.io/projected/2be7a636-2fc6-4739-a559-a637553541ad-kube-api-access-xwfc7\") pod \"route-controller-manager-598bb7b487-b6l9r\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.466298 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2be7a636-2fc6-4739-a559-a637553541ad-config\") pod \"route-controller-manager-598bb7b487-b6l9r\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.468082 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-config" (OuterVolumeSpecName: "config") pod "ffde9a75-3edf-462f-af90-c312c4f05986" (UID: "ffde9a75-3edf-462f-af90-c312c4f05986"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.469404 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2be7a636-2fc6-4739-a559-a637553541ad-config\") pod \"route-controller-manager-598bb7b487-b6l9r\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.469652 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffde9a75-3edf-462f-af90-c312c4f05986-tmp" (OuterVolumeSpecName: "tmp") pod "ffde9a75-3edf-462f-af90-c312c4f05986" (UID: "ffde9a75-3edf-462f-af90-c312c4f05986"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.470211 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-client-ca" (OuterVolumeSpecName: "client-ca") pod "ffde9a75-3edf-462f-af90-c312c4f05986" (UID: "ffde9a75-3edf-462f-af90-c312c4f05986"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.470699 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2be7a636-2fc6-4739-a559-a637553541ad-client-ca\") pod \"route-controller-manager-598bb7b487-b6l9r\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.471102 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2be7a636-2fc6-4739-a559-a637553541ad-tmp\") pod \"route-controller-manager-598bb7b487-b6l9r\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.471554 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "ffde9a75-3edf-462f-af90-c312c4f05986" (UID: "ffde9a75-3edf-462f-af90-c312c4f05986"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.475383 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffde9a75-3edf-462f-af90-c312c4f05986-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "ffde9a75-3edf-462f-af90-c312c4f05986" (UID: "ffde9a75-3edf-462f-af90-c312c4f05986"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.475901 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffde9a75-3edf-462f-af90-c312c4f05986-kube-api-access-jlbg8" (OuterVolumeSpecName: "kube-api-access-jlbg8") pod "ffde9a75-3edf-462f-af90-c312c4f05986" (UID: "ffde9a75-3edf-462f-af90-c312c4f05986"). InnerVolumeSpecName "kube-api-access-jlbg8". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.481147 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2be7a636-2fc6-4739-a559-a637553541ad-serving-cert\") pod \"route-controller-manager-598bb7b487-b6l9r\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.492972 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwfc7\" (UniqueName: \"kubernetes.io/projected/2be7a636-2fc6-4739-a559-a637553541ad-kube-api-access-xwfc7\") pod \"route-controller-manager-598bb7b487-b6l9r\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.567791 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-proxy-ca-bundles\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.567843 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zb9w2\" (UniqueName: \"kubernetes.io/projected/d82500ff-9b92-4588-bfec-2df60e572d61-kube-api-access-zb9w2\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.567882 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-client-ca\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.567906 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d82500ff-9b92-4588-bfec-2df60e572d61-serving-cert\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.567924 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/d82500ff-9b92-4588-bfec-2df60e572d61-tmp\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.568344 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-config\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.568733 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.568765 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ffde9a75-3edf-462f-af90-c312c4f05986-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.568780 5113 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/ffde9a75-3edf-462f-af90-c312c4f05986-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.568798 5113 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.568810 5113 reconciler_common.go:299] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ffde9a75-3edf-462f-af90-c312c4f05986-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.568827 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-jlbg8\" (UniqueName: \"kubernetes.io/projected/ffde9a75-3edf-462f-af90-c312c4f05986-kube-api-access-jlbg8\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.611726 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.670291 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-config\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.670638 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-proxy-ca-bundles\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.670675 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-zb9w2\" (UniqueName: \"kubernetes.io/projected/d82500ff-9b92-4588-bfec-2df60e572d61-kube-api-access-zb9w2\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.670724 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-client-ca\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.670748 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d82500ff-9b92-4588-bfec-2df60e572d61-serving-cert\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.670785 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/d82500ff-9b92-4588-bfec-2df60e572d61-tmp\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.671677 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/d82500ff-9b92-4588-bfec-2df60e572d61-tmp\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.672168 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-client-ca\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.672397 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-config\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.672503 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-proxy-ca-bundles\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.680243 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d82500ff-9b92-4588-bfec-2df60e572d61-serving-cert\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.696602 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-zb9w2\" (UniqueName: \"kubernetes.io/projected/d82500ff-9b92-4588-bfec-2df60e572d61-kube-api-access-zb9w2\") pod \"controller-manager-c6c5d6787-f7lmt\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:27 crc kubenswrapper[5113]: I0130 00:12:27.784671 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.041942 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-747b44746d-kxkvb" event={"ID":"d6483c17-196a-4e41-8950-46d60c5505c9","Type":"ContainerStarted","Data":"efba5d862548e590a6ea14a8c7dcd06d5a14e4c3c1af2b320374787309e626c3"} Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.042606 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-console/downloads-747b44746d-kxkvb" Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.042693 5113 patch_prober.go:28] interesting pod/downloads-747b44746d-kxkvb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.042729 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-kxkvb" podUID="d6483c17-196a-4e41-8950-46d60c5505c9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.050753 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvfsp" event={"ID":"84d95933-fc27-469e-91be-cb781299ccd2","Type":"ContainerStarted","Data":"0fe8f043b7da890fabd2f0313e23cd5e3dacf910e0aa04e141c9b4e15c045b0d"} Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.056414 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" event={"ID":"ffde9a75-3edf-462f-af90-c312c4f05986","Type":"ContainerDied","Data":"c1c012b62bfe6b7488b4f703d5f3583c4f3a6737978bb60ce88d3828c929ea9f"} Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.056460 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-65b6cccf98-8rbrn" Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.056471 5113 scope.go:117] "RemoveContainer" containerID="82396be9855059532497cebfeea008ef1c5c92af1e3c7a00558a4c6b91726aaf" Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.059362 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-znr9f" event={"ID":"2d28df1d-4619-45dc-8fee-b482cfad0ead","Type":"ContainerStarted","Data":"74e27cd321138db721772adff9db44767f09ca94a735d9afac4a8efb1f46331d"} Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.064284 5113 generic.go:358] "Generic (PLEG): container finished" podID="a527979f-66df-45e5-a643-2c2ebc9fc7c6" containerID="de0a54b6b9f26cb97b7f475dcb22083112421174123bb8f78562d372be77cb31" exitCode=0 Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.064402 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gv2cj" event={"ID":"a527979f-66df-45e5-a643-2c2ebc9fc7c6","Type":"ContainerDied","Data":"de0a54b6b9f26cb97b7f475dcb22083112421174123bb8f78562d372be77cb31"} Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.066492 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbzxk" event={"ID":"ed2eb5b7-1d01-4029-86f8-47a057a0352e","Type":"ContainerStarted","Data":"7791ae46c204e764134257a6adf200e96bb188bf0f3e36c075798fd5e83cb36b"} Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.079965 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r"] Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.084907 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-b5d9m_d245ee6c-4b68-41b6-b516-38a882666394/kube-multus-additional-cni-plugins/0.log" Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.085191 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.086789 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-b5d9m" event={"ID":"d245ee6c-4b68-41b6-b516-38a882666394","Type":"ContainerDied","Data":"c49aca45d0e6e27d209612c675dc46485683c2b3053a6b6fb0e20dd090ffce1e"} Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.091380 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" event={"ID":"4669eb3c-24d5-4643-91d1-de96326757fa","Type":"ContainerDied","Data":"3e06f21278ec0b235bf9c7f7f6616a93aafaa0df7b7635464108d8445eae9808"} Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.091430 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj" Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.098785 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hr22j" event={"ID":"6b49ee2f-63ad-4580-b29c-8ebcade15f14","Type":"ContainerStarted","Data":"8cc0b00eab75282eb4c2462f4f8ca60c31a5ee0dce6782039d29c47ae7a9de81"} Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.112685 5113 generic.go:358] "Generic (PLEG): container finished" podID="9f80d9f8-15a6-42da-bfff-15ae5c525dca" containerID="e65c6e7da0ea05fcea9cacb34320708ce7f9797bb910335627a656791b2ab71a" exitCode=0 Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.112819 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpzhr" event={"ID":"9f80d9f8-15a6-42da-bfff-15ae5c525dca","Type":"ContainerDied","Data":"e65c6e7da0ea05fcea9cacb34320708ce7f9797bb910335627a656791b2ab71a"} Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.269415 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-c6c5d6787-f7lmt"] Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.337317 5113 scope.go:117] "RemoveContainer" containerID="631eaa6cc5bc8e2062d47f6b775622416937cc890c3a1fbba2bcc7da8ceabed3" Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.446908 5113 scope.go:117] "RemoveContainer" containerID="d2518e74c027a7fef75f3a28712197dbc5b4c4f39c226efe3d223cffcef991a2" Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.457573 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-65b6cccf98-8rbrn"] Jan 30 00:12:28 crc kubenswrapper[5113]: I0130 00:12:28.468318 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-65b6cccf98-8rbrn"] Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.125651 5113 generic.go:358] "Generic (PLEG): container finished" podID="6b49ee2f-63ad-4580-b29c-8ebcade15f14" containerID="8cc0b00eab75282eb4c2462f4f8ca60c31a5ee0dce6782039d29c47ae7a9de81" exitCode=0 Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.136358 5113 generic.go:358] "Generic (PLEG): container finished" podID="2d28df1d-4619-45dc-8fee-b482cfad0ead" containerID="74e27cd321138db721772adff9db44767f09ca94a735d9afac4a8efb1f46331d" exitCode=0 Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.140152 5113 generic.go:358] "Generic (PLEG): container finished" podID="ed2eb5b7-1d01-4029-86f8-47a057a0352e" containerID="7791ae46c204e764134257a6adf200e96bb188bf0f3e36c075798fd5e83cb36b" exitCode=0 Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.760854 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj"] Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.760927 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hr22j" event={"ID":"6b49ee2f-63ad-4580-b29c-8ebcade15f14","Type":"ContainerDied","Data":"8cc0b00eab75282eb4c2462f4f8ca60c31a5ee0dce6782039d29c47ae7a9de81"} Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.760983 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-776cdc94d6-99ddj"] Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.761016 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-b5d9m"] Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.761039 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" event={"ID":"2be7a636-2fc6-4739-a559-a637553541ad","Type":"ContainerStarted","Data":"5e3ebe73514aa5855b6b11c4e3d6ea9c8f31e47cd50b8c7c2264f818ea284162"} Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.761074 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-b5d9m"] Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.762255 5113 patch_prober.go:28] interesting pod/downloads-747b44746d-kxkvb container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.762360 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-747b44746d-kxkvb" podUID="d6483c17-196a-4e41-8950-46d60c5505c9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.799247 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4669eb3c-24d5-4643-91d1-de96326757fa" path="/var/lib/kubelet/pods/4669eb3c-24d5-4643-91d1-de96326757fa/volumes" Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.800495 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d245ee6c-4b68-41b6-b516-38a882666394" path="/var/lib/kubelet/pods/d245ee6c-4b68-41b6-b516-38a882666394/volumes" Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.801607 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffde9a75-3edf-462f-af90-c312c4f05986" path="/var/lib/kubelet/pods/ffde9a75-3edf-462f-af90-c312c4f05986/volumes" Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.803247 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" event={"ID":"d82500ff-9b92-4588-bfec-2df60e572d61","Type":"ContainerStarted","Data":"fc99f1e3173b957fb47f320669194e250d669d8b28d53791b95388557aab930c"} Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.803521 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-znr9f" event={"ID":"2d28df1d-4619-45dc-8fee-b482cfad0ead","Type":"ContainerDied","Data":"74e27cd321138db721772adff9db44767f09ca94a735d9afac4a8efb1f46331d"} Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.803576 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbzxk" event={"ID":"ed2eb5b7-1d01-4029-86f8-47a057a0352e","Type":"ContainerDied","Data":"7791ae46c204e764134257a6adf200e96bb188bf0f3e36c075798fd5e83cb36b"} Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.803601 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-12-crc"] Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.902959 5113 patch_prober.go:28] interesting pod/downloads-747b44746d-kxkvb container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 30 00:12:29 crc kubenswrapper[5113]: I0130 00:12:29.903424 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-747b44746d-kxkvb" podUID="d6483c17-196a-4e41-8950-46d60c5505c9" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 30 00:12:30 crc kubenswrapper[5113]: I0130 00:12:30.808747 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-12-crc"] Jan 30 00:12:30 crc kubenswrapper[5113]: I0130 00:12:30.809180 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:30 crc kubenswrapper[5113]: I0130 00:12:30.814869 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-apiserver\"/\"kube-root-ca.crt\"" Jan 30 00:12:30 crc kubenswrapper[5113]: I0130 00:12:30.816269 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-apiserver\"/\"installer-sa-dockercfg-bqqnb\"" Jan 30 00:12:30 crc kubenswrapper[5113]: I0130 00:12:30.823092 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dgdzx" event={"ID":"b8b3ed2e-5058-4d4f-ba3a-e1a083531407","Type":"ContainerStarted","Data":"e45284d1171a848f6e0182756bc91c5ed5a38ff079c3d4774b6601665f21871c"} Jan 30 00:12:30 crc kubenswrapper[5113]: I0130 00:12:30.823135 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" event={"ID":"2be7a636-2fc6-4739-a559-a637553541ad","Type":"ContainerStarted","Data":"ec904ccacd4150674fce861523957adbc05b98266b30bd133861ed268e8ef034"} Jan 30 00:12:30 crc kubenswrapper[5113]: I0130 00:12:30.823149 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gv2cj" event={"ID":"a527979f-66df-45e5-a643-2c2ebc9fc7c6","Type":"ContainerStarted","Data":"dd7b1883507c1da7da97c2a27d6549d626755ed1da21a9abdf3daf74ba3c5d86"} Jan 30 00:12:30 crc kubenswrapper[5113]: I0130 00:12:30.823165 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vjvch" event={"ID":"5d9cf73a-1c55-49b9-9664-393fd1ea11ec","Type":"ContainerStarted","Data":"03a83c31fd5983a828dd0510903bb4b35996bc449640d20b1881e369ff5553f8"} Jan 30 00:12:30 crc kubenswrapper[5113]: I0130 00:12:30.823175 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hr22j" event={"ID":"6b49ee2f-63ad-4580-b29c-8ebcade15f14","Type":"ContainerStarted","Data":"a89fbaaf3ca10e10c3e1a2ff3fec9623dabcc14884462984559b9881c07500f7"} Jan 30 00:12:30 crc kubenswrapper[5113]: I0130 00:12:30.955143 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc8879ad-79d6-4006-8b73-2a881b742008-kube-api-access\") pod \"revision-pruner-12-crc\" (UID: \"dc8879ad-79d6-4006-8b73-2a881b742008\") " pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:30 crc kubenswrapper[5113]: I0130 00:12:30.955195 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dc8879ad-79d6-4006-8b73-2a881b742008-kubelet-dir\") pod \"revision-pruner-12-crc\" (UID: \"dc8879ad-79d6-4006-8b73-2a881b742008\") " pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.056667 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc8879ad-79d6-4006-8b73-2a881b742008-kube-api-access\") pod \"revision-pruner-12-crc\" (UID: \"dc8879ad-79d6-4006-8b73-2a881b742008\") " pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.056742 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dc8879ad-79d6-4006-8b73-2a881b742008-kubelet-dir\") pod \"revision-pruner-12-crc\" (UID: \"dc8879ad-79d6-4006-8b73-2a881b742008\") " pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.058396 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dc8879ad-79d6-4006-8b73-2a881b742008-kubelet-dir\") pod \"revision-pruner-12-crc\" (UID: \"dc8879ad-79d6-4006-8b73-2a881b742008\") " pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.104936 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc8879ad-79d6-4006-8b73-2a881b742008-kube-api-access\") pod \"revision-pruner-12-crc\" (UID: \"dc8879ad-79d6-4006-8b73-2a881b742008\") " pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.131446 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.229712 5113 generic.go:358] "Generic (PLEG): container finished" podID="b8b3ed2e-5058-4d4f-ba3a-e1a083531407" containerID="e45284d1171a848f6e0182756bc91c5ed5a38ff079c3d4774b6601665f21871c" exitCode=0 Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.229867 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dgdzx" event={"ID":"b8b3ed2e-5058-4d4f-ba3a-e1a083531407","Type":"ContainerDied","Data":"e45284d1171a848f6e0182756bc91c5ed5a38ff079c3d4774b6601665f21871c"} Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.237804 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpzhr" event={"ID":"9f80d9f8-15a6-42da-bfff-15ae5c525dca","Type":"ContainerStarted","Data":"157db1ad206fcec46c294ad298a9e43c22ead217ae8be09548b063062748f790"} Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.252800 5113 generic.go:358] "Generic (PLEG): container finished" podID="84d95933-fc27-469e-91be-cb781299ccd2" containerID="0fe8f043b7da890fabd2f0313e23cd5e3dacf910e0aa04e141c9b4e15c045b0d" exitCode=0 Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.254059 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvfsp" event={"ID":"84d95933-fc27-469e-91be-cb781299ccd2","Type":"ContainerDied","Data":"0fe8f043b7da890fabd2f0313e23cd5e3dacf910e0aa04e141c9b4e15c045b0d"} Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.261798 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" event={"ID":"d82500ff-9b92-4588-bfec-2df60e572d61","Type":"ContainerStarted","Data":"503c2a37a2be3949aacb53edba8597c5d72bff602194a8fe003aaee8ac396796"} Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.268459 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-znr9f" event={"ID":"2d28df1d-4619-45dc-8fee-b482cfad0ead","Type":"ContainerStarted","Data":"2396ab5eca03cea7f1c690d25238b8ee973aa1c7063801721ad583b4e980d0a4"} Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.491641 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.492394 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.497140 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-12-crc"] Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.501894 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.525110 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lpzhr" podStartSLOduration=8.391515296 podStartE2EDuration="39.525086638s" podCreationTimestamp="2026-01-30 00:11:52 +0000 UTC" firstStartedPulling="2026-01-30 00:11:55.626922734 +0000 UTC m=+135.699528111" lastFinishedPulling="2026-01-30 00:12:26.760494076 +0000 UTC m=+166.833099453" observedRunningTime="2026-01-30 00:12:31.516647045 +0000 UTC m=+171.589252452" watchObservedRunningTime="2026-01-30 00:12:31.525086638 +0000 UTC m=+171.597692005" Jan 30 00:12:31 crc kubenswrapper[5113]: W0130 00:12:31.535570 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-poddc8879ad_79d6_4006_8b73_2a881b742008.slice/crio-4d6502cf3bf94a1d35391af4282e64522009cd99d2e31f8493c0d14836ee0380 WatchSource:0}: Error finding container 4d6502cf3bf94a1d35391af4282e64522009cd99d2e31f8493c0d14836ee0380: Status 404 returned error can't find the container with id 4d6502cf3bf94a1d35391af4282e64522009cd99d2e31f8493c0d14836ee0380 Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.547701 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-znr9f" podStartSLOduration=6.278316122 podStartE2EDuration="41.547547266s" podCreationTimestamp="2026-01-30 00:11:50 +0000 UTC" firstStartedPulling="2026-01-30 00:11:52.085855274 +0000 UTC m=+132.158460651" lastFinishedPulling="2026-01-30 00:12:27.355086418 +0000 UTC m=+167.427691795" observedRunningTime="2026-01-30 00:12:31.538428912 +0000 UTC m=+171.611034289" watchObservedRunningTime="2026-01-30 00:12:31.547547266 +0000 UTC m=+171.620152643" Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.561869 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hr22j" podStartSLOduration=7.682894456 podStartE2EDuration="41.561848361s" podCreationTimestamp="2026-01-30 00:11:50 +0000 UTC" firstStartedPulling="2026-01-30 00:11:53.322658239 +0000 UTC m=+133.395263616" lastFinishedPulling="2026-01-30 00:12:27.201612134 +0000 UTC m=+167.274217521" observedRunningTime="2026-01-30 00:12:31.556200905 +0000 UTC m=+171.628806282" watchObservedRunningTime="2026-01-30 00:12:31.561848361 +0000 UTC m=+171.634453738" Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.586292 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gv2cj" podStartSLOduration=8.525053299 podStartE2EDuration="39.5862667s" podCreationTimestamp="2026-01-30 00:11:52 +0000 UTC" firstStartedPulling="2026-01-30 00:11:55.699245003 +0000 UTC m=+135.771850370" lastFinishedPulling="2026-01-30 00:12:26.760458394 +0000 UTC m=+166.833063771" observedRunningTime="2026-01-30 00:12:31.583950628 +0000 UTC m=+171.656556025" watchObservedRunningTime="2026-01-30 00:12:31.5862667 +0000 UTC m=+171.658872077" Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.607431 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" podStartSLOduration=5.607405118 podStartE2EDuration="5.607405118s" podCreationTimestamp="2026-01-30 00:12:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:31.602388671 +0000 UTC m=+171.674994048" watchObservedRunningTime="2026-01-30 00:12:31.607405118 +0000 UTC m=+171.680010505" Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.638756 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" podStartSLOduration=5.638727662 podStartE2EDuration="5.638727662s" podCreationTimestamp="2026-01-30 00:12:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:31.634585953 +0000 UTC m=+171.707191340" watchObservedRunningTime="2026-01-30 00:12:31.638727662 +0000 UTC m=+171.711333039" Jan 30 00:12:31 crc kubenswrapper[5113]: I0130 00:12:31.640830 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:12:32 crc kubenswrapper[5113]: I0130 00:12:32.277452 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvfsp" event={"ID":"84d95933-fc27-469e-91be-cb781299ccd2","Type":"ContainerStarted","Data":"73ea884e064974e952bf9033ecb3e45f1395565f2db1ec0617c3e70ef8bcbba1"} Jan 30 00:12:32 crc kubenswrapper[5113]: I0130 00:12:32.279985 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-12-crc" event={"ID":"dc8879ad-79d6-4006-8b73-2a881b742008","Type":"ContainerStarted","Data":"4d6502cf3bf94a1d35391af4282e64522009cd99d2e31f8493c0d14836ee0380"} Jan 30 00:12:32 crc kubenswrapper[5113]: I0130 00:12:32.281625 5113 generic.go:358] "Generic (PLEG): container finished" podID="5d9cf73a-1c55-49b9-9664-393fd1ea11ec" containerID="03a83c31fd5983a828dd0510903bb4b35996bc449640d20b1881e369ff5553f8" exitCode=0 Jan 30 00:12:32 crc kubenswrapper[5113]: I0130 00:12:32.281708 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vjvch" event={"ID":"5d9cf73a-1c55-49b9-9664-393fd1ea11ec","Type":"ContainerDied","Data":"03a83c31fd5983a828dd0510903bb4b35996bc449640d20b1881e369ff5553f8"} Jan 30 00:12:32 crc kubenswrapper[5113]: I0130 00:12:32.288641 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbzxk" event={"ID":"ed2eb5b7-1d01-4029-86f8-47a057a0352e","Type":"ContainerStarted","Data":"f957aef456f39457593654e907c136da07d322fd6c11eb3666f083900e4edb1e"} Jan 30 00:12:32 crc kubenswrapper[5113]: I0130 00:12:32.291808 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dgdzx" event={"ID":"b8b3ed2e-5058-4d4f-ba3a-e1a083531407","Type":"ContainerStarted","Data":"7c98f0602738b08851224b8f08d6883eb8c6cbe5cc1fb85fff0d14a5957d7ba8"} Jan 30 00:12:32 crc kubenswrapper[5113]: I0130 00:12:32.340240 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tvfsp" podStartSLOduration=7.618163815 podStartE2EDuration="39.340216499s" podCreationTimestamp="2026-01-30 00:11:53 +0000 UTC" firstStartedPulling="2026-01-30 00:11:55.633451866 +0000 UTC m=+135.706057243" lastFinishedPulling="2026-01-30 00:12:27.35550455 +0000 UTC m=+167.428109927" observedRunningTime="2026-01-30 00:12:32.311796145 +0000 UTC m=+172.384401522" watchObservedRunningTime="2026-01-30 00:12:32.340216499 +0000 UTC m=+172.412821876" Jan 30 00:12:32 crc kubenswrapper[5113]: I0130 00:12:32.362723 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dgdzx" podStartSLOduration=8.685232331 podStartE2EDuration="39.362696658s" podCreationTimestamp="2026-01-30 00:11:53 +0000 UTC" firstStartedPulling="2026-01-30 00:11:56.736631196 +0000 UTC m=+136.809236573" lastFinishedPulling="2026-01-30 00:12:27.414095523 +0000 UTC m=+167.486700900" observedRunningTime="2026-01-30 00:12:32.361122379 +0000 UTC m=+172.433727776" watchObservedRunningTime="2026-01-30 00:12:32.362696658 +0000 UTC m=+172.435302035" Jan 30 00:12:32 crc kubenswrapper[5113]: I0130 00:12:32.396234 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rbzxk" podStartSLOduration=8.437937788 podStartE2EDuration="42.39620452s" podCreationTimestamp="2026-01-30 00:11:50 +0000 UTC" firstStartedPulling="2026-01-30 00:11:53.243400854 +0000 UTC m=+133.316006231" lastFinishedPulling="2026-01-30 00:12:27.201667586 +0000 UTC m=+167.274272963" observedRunningTime="2026-01-30 00:12:32.391757011 +0000 UTC m=+172.464362388" watchObservedRunningTime="2026-01-30 00:12:32.39620452 +0000 UTC m=+172.468809897" Jan 30 00:12:32 crc kubenswrapper[5113]: I0130 00:12:32.649062 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gv2cj" Jan 30 00:12:32 crc kubenswrapper[5113]: I0130 00:12:32.649127 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/redhat-marketplace-gv2cj" Jan 30 00:12:33 crc kubenswrapper[5113]: I0130 00:12:33.302548 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-12-crc" event={"ID":"dc8879ad-79d6-4006-8b73-2a881b742008","Type":"ContainerStarted","Data":"ea623bc65403e579ec3fc7d85f2cbdeca0f5e3fe1efb83681ff22f4fa09cbbee"} Jan 30 00:12:33 crc kubenswrapper[5113]: I0130 00:12:33.325495 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-12-crc" podStartSLOduration=4.325462011 podStartE2EDuration="4.325462011s" podCreationTimestamp="2026-01-30 00:12:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:33.325144461 +0000 UTC m=+173.397749838" watchObservedRunningTime="2026-01-30 00:12:33.325462011 +0000 UTC m=+173.398067388" Jan 30 00:12:33 crc kubenswrapper[5113]: I0130 00:12:33.364312 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/redhat-marketplace-lpzhr" Jan 30 00:12:33 crc kubenswrapper[5113]: I0130 00:12:33.364436 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lpzhr" Jan 30 00:12:33 crc kubenswrapper[5113]: I0130 00:12:33.861279 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tvfsp" Jan 30 00:12:33 crc kubenswrapper[5113]: I0130 00:12:33.861551 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/redhat-operators-tvfsp" Jan 30 00:12:34 crc kubenswrapper[5113]: I0130 00:12:34.158007 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/redhat-operators-dgdzx" Jan 30 00:12:34 crc kubenswrapper[5113]: I0130 00:12:34.158096 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dgdzx" Jan 30 00:12:34 crc kubenswrapper[5113]: I0130 00:12:34.318226 5113 generic.go:358] "Generic (PLEG): container finished" podID="dc8879ad-79d6-4006-8b73-2a881b742008" containerID="ea623bc65403e579ec3fc7d85f2cbdeca0f5e3fe1efb83681ff22f4fa09cbbee" exitCode=0 Jan 30 00:12:34 crc kubenswrapper[5113]: I0130 00:12:34.318349 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-12-crc" event={"ID":"dc8879ad-79d6-4006-8b73-2a881b742008","Type":"ContainerDied","Data":"ea623bc65403e579ec3fc7d85f2cbdeca0f5e3fe1efb83681ff22f4fa09cbbee"} Jan 30 00:12:34 crc kubenswrapper[5113]: I0130 00:12:34.321727 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vjvch" event={"ID":"5d9cf73a-1c55-49b9-9664-393fd1ea11ec","Type":"ContainerStarted","Data":"d117539f67b22b07be1f2b4407532487e5ad8c9ad46acc6b7d53dd8e33267b3b"} Jan 30 00:12:34 crc kubenswrapper[5113]: I0130 00:12:34.354459 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vjvch" podStartSLOduration=9.106242043 podStartE2EDuration="44.354437823s" podCreationTimestamp="2026-01-30 00:11:50 +0000 UTC" firstStartedPulling="2026-01-30 00:11:52.107729814 +0000 UTC m=+132.180335201" lastFinishedPulling="2026-01-30 00:12:27.355925604 +0000 UTC m=+167.428530981" observedRunningTime="2026-01-30 00:12:34.352188973 +0000 UTC m=+174.424794360" watchObservedRunningTime="2026-01-30 00:12:34.354437823 +0000 UTC m=+174.427043200" Jan 30 00:12:34 crc kubenswrapper[5113]: I0130 00:12:34.482174 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-lpzhr" podUID="9f80d9f8-15a6-42da-bfff-15ae5c525dca" containerName="registry-server" probeResult="failure" output=< Jan 30 00:12:34 crc kubenswrapper[5113]: timeout: failed to connect service ":50051" within 1s Jan 30 00:12:34 crc kubenswrapper[5113]: > Jan 30 00:12:34 crc kubenswrapper[5113]: I0130 00:12:34.486422 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-gv2cj" podUID="a527979f-66df-45e5-a643-2c2ebc9fc7c6" containerName="registry-server" probeResult="failure" output=< Jan 30 00:12:34 crc kubenswrapper[5113]: timeout: failed to connect service ":50051" within 1s Jan 30 00:12:34 crc kubenswrapper[5113]: > Jan 30 00:12:34 crc kubenswrapper[5113]: I0130 00:12:34.930962 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-tvfsp" podUID="84d95933-fc27-469e-91be-cb781299ccd2" containerName="registry-server" probeResult="failure" output=< Jan 30 00:12:34 crc kubenswrapper[5113]: timeout: failed to connect service ":50051" within 1s Jan 30 00:12:34 crc kubenswrapper[5113]: > Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.199800 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dgdzx" podUID="b8b3ed2e-5058-4d4f-ba3a-e1a083531407" containerName="registry-server" probeResult="failure" output=< Jan 30 00:12:35 crc kubenswrapper[5113]: timeout: failed to connect service ":50051" within 1s Jan 30 00:12:35 crc kubenswrapper[5113]: > Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.375120 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-12-crc"] Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.644714 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-12-crc"] Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.645036 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.651908 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.746062 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dc8879ad-79d6-4006-8b73-2a881b742008-kubelet-dir\") pod \"dc8879ad-79d6-4006-8b73-2a881b742008\" (UID: \"dc8879ad-79d6-4006-8b73-2a881b742008\") " Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.746123 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc8879ad-79d6-4006-8b73-2a881b742008-kube-api-access\") pod \"dc8879ad-79d6-4006-8b73-2a881b742008\" (UID: \"dc8879ad-79d6-4006-8b73-2a881b742008\") " Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.746219 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dc8879ad-79d6-4006-8b73-2a881b742008-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "dc8879ad-79d6-4006-8b73-2a881b742008" (UID: "dc8879ad-79d6-4006-8b73-2a881b742008"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.746568 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/6ab098a6-e349-4f74-b76f-e0ff9261030e-var-lock\") pod \"installer-12-crc\" (UID: \"6ab098a6-e349-4f74-b76f-e0ff9261030e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.746662 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ab098a6-e349-4f74-b76f-e0ff9261030e-kube-api-access\") pod \"installer-12-crc\" (UID: \"6ab098a6-e349-4f74-b76f-e0ff9261030e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.746720 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6ab098a6-e349-4f74-b76f-e0ff9261030e-kubelet-dir\") pod \"installer-12-crc\" (UID: \"6ab098a6-e349-4f74-b76f-e0ff9261030e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.746800 5113 reconciler_common.go:299] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dc8879ad-79d6-4006-8b73-2a881b742008-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.756071 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc8879ad-79d6-4006-8b73-2a881b742008-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "dc8879ad-79d6-4006-8b73-2a881b742008" (UID: "dc8879ad-79d6-4006-8b73-2a881b742008"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.849036 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6ab098a6-e349-4f74-b76f-e0ff9261030e-kubelet-dir\") pod \"installer-12-crc\" (UID: \"6ab098a6-e349-4f74-b76f-e0ff9261030e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.849110 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/6ab098a6-e349-4f74-b76f-e0ff9261030e-var-lock\") pod \"installer-12-crc\" (UID: \"6ab098a6-e349-4f74-b76f-e0ff9261030e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.849172 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ab098a6-e349-4f74-b76f-e0ff9261030e-kube-api-access\") pod \"installer-12-crc\" (UID: \"6ab098a6-e349-4f74-b76f-e0ff9261030e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.849227 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc8879ad-79d6-4006-8b73-2a881b742008-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.849262 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6ab098a6-e349-4f74-b76f-e0ff9261030e-kubelet-dir\") pod \"installer-12-crc\" (UID: \"6ab098a6-e349-4f74-b76f-e0ff9261030e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.849360 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/6ab098a6-e349-4f74-b76f-e0ff9261030e-var-lock\") pod \"installer-12-crc\" (UID: \"6ab098a6-e349-4f74-b76f-e0ff9261030e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.870979 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ab098a6-e349-4f74-b76f-e0ff9261030e-kube-api-access\") pod \"installer-12-crc\" (UID: \"6ab098a6-e349-4f74-b76f-e0ff9261030e\") " pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:35 crc kubenswrapper[5113]: I0130 00:12:35.967768 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:12:36 crc kubenswrapper[5113]: I0130 00:12:36.345506 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-12-crc" Jan 30 00:12:36 crc kubenswrapper[5113]: I0130 00:12:36.345487 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-12-crc" event={"ID":"dc8879ad-79d6-4006-8b73-2a881b742008","Type":"ContainerDied","Data":"4d6502cf3bf94a1d35391af4282e64522009cd99d2e31f8493c0d14836ee0380"} Jan 30 00:12:36 crc kubenswrapper[5113]: I0130 00:12:36.346176 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d6502cf3bf94a1d35391af4282e64522009cd99d2e31f8493c0d14836ee0380" Jan 30 00:12:36 crc kubenswrapper[5113]: W0130 00:12:36.550795 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod6ab098a6_e349_4f74_b76f_e0ff9261030e.slice/crio-99d5e969bbbded82782b610f8cc36f4965ea303364f692864ae6655a7e903ad1 WatchSource:0}: Error finding container 99d5e969bbbded82782b610f8cc36f4965ea303364f692864ae6655a7e903ad1: Status 404 returned error can't find the container with id 99d5e969bbbded82782b610f8cc36f4965ea303364f692864ae6655a7e903ad1 Jan 30 00:12:36 crc kubenswrapper[5113]: I0130 00:12:36.551338 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-12-crc"] Jan 30 00:12:37 crc kubenswrapper[5113]: I0130 00:12:37.354936 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-12-crc" event={"ID":"6ab098a6-e349-4f74-b76f-e0ff9261030e","Type":"ContainerStarted","Data":"99d5e969bbbded82782b610f8cc36f4965ea303364f692864ae6655a7e903ad1"} Jan 30 00:12:38 crc kubenswrapper[5113]: I0130 00:12:38.363905 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-12-crc" event={"ID":"6ab098a6-e349-4f74-b76f-e0ff9261030e","Type":"ContainerStarted","Data":"906091be34327e7619ec031d5a6d060ef1fae14147a318294d1611733604af7e"} Jan 30 00:12:39 crc kubenswrapper[5113]: I0130 00:12:39.769827 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-747b44746d-kxkvb" Jan 30 00:12:40 crc kubenswrapper[5113]: I0130 00:12:40.410626 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-12-crc" podStartSLOduration=5.410595135 podStartE2EDuration="5.410595135s" podCreationTimestamp="2026-01-30 00:12:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:12:40.407146498 +0000 UTC m=+180.479751885" watchObservedRunningTime="2026-01-30 00:12:40.410595135 +0000 UTC m=+180.483200552" Jan 30 00:12:40 crc kubenswrapper[5113]: I0130 00:12:40.463593 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-znr9f" Jan 30 00:12:40 crc kubenswrapper[5113]: I0130 00:12:40.463693 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/certified-operators-znr9f" Jan 30 00:12:40 crc kubenswrapper[5113]: I0130 00:12:40.532225 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-znr9f" Jan 30 00:12:40 crc kubenswrapper[5113]: I0130 00:12:40.643242 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/community-operators-vjvch" Jan 30 00:12:40 crc kubenswrapper[5113]: I0130 00:12:40.643517 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vjvch" Jan 30 00:12:40 crc kubenswrapper[5113]: I0130 00:12:40.687957 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vjvch" Jan 30 00:12:40 crc kubenswrapper[5113]: I0130 00:12:40.887169 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hr22j" Jan 30 00:12:40 crc kubenswrapper[5113]: I0130 00:12:40.887261 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/certified-operators-hr22j" Jan 30 00:12:40 crc kubenswrapper[5113]: I0130 00:12:40.949589 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hr22j" Jan 30 00:12:41 crc kubenswrapper[5113]: I0130 00:12:41.347973 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/community-operators-rbzxk" Jan 30 00:12:41 crc kubenswrapper[5113]: I0130 00:12:41.348227 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rbzxk" Jan 30 00:12:41 crc kubenswrapper[5113]: I0130 00:12:41.422387 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rbzxk" Jan 30 00:12:41 crc kubenswrapper[5113]: I0130 00:12:41.458397 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hr22j" Jan 30 00:12:41 crc kubenswrapper[5113]: I0130 00:12:41.465283 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vjvch" Jan 30 00:12:41 crc kubenswrapper[5113]: I0130 00:12:41.471193 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-znr9f" Jan 30 00:12:41 crc kubenswrapper[5113]: I0130 00:12:41.976285 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hr22j"] Jan 30 00:12:42 crc kubenswrapper[5113]: I0130 00:12:42.467134 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rbzxk" Jan 30 00:12:42 crc kubenswrapper[5113]: I0130 00:12:42.711219 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gv2cj" Jan 30 00:12:42 crc kubenswrapper[5113]: I0130 00:12:42.784921 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gv2cj" Jan 30 00:12:43 crc kubenswrapper[5113]: I0130 00:12:43.409884 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hr22j" podUID="6b49ee2f-63ad-4580-b29c-8ebcade15f14" containerName="registry-server" containerID="cri-o://a89fbaaf3ca10e10c3e1a2ff3fec9623dabcc14884462984559b9881c07500f7" gracePeriod=2 Jan 30 00:12:43 crc kubenswrapper[5113]: I0130 00:12:43.420089 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lpzhr" Jan 30 00:12:43 crc kubenswrapper[5113]: I0130 00:12:43.479980 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lpzhr" Jan 30 00:12:43 crc kubenswrapper[5113]: I0130 00:12:43.784119 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rbzxk"] Jan 30 00:12:43 crc kubenswrapper[5113]: I0130 00:12:43.901753 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tvfsp" Jan 30 00:12:43 crc kubenswrapper[5113]: I0130 00:12:43.946469 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tvfsp" Jan 30 00:12:44 crc kubenswrapper[5113]: I0130 00:12:44.202605 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dgdzx" Jan 30 00:12:44 crc kubenswrapper[5113]: I0130 00:12:44.242064 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dgdzx" Jan 30 00:12:44 crc kubenswrapper[5113]: I0130 00:12:44.415690 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rbzxk" podUID="ed2eb5b7-1d01-4029-86f8-47a057a0352e" containerName="registry-server" containerID="cri-o://f957aef456f39457593654e907c136da07d322fd6c11eb3666f083900e4edb1e" gracePeriod=2 Jan 30 00:12:45 crc kubenswrapper[5113]: I0130 00:12:45.432630 5113 generic.go:358] "Generic (PLEG): container finished" podID="6b49ee2f-63ad-4580-b29c-8ebcade15f14" containerID="a89fbaaf3ca10e10c3e1a2ff3fec9623dabcc14884462984559b9881c07500f7" exitCode=0 Jan 30 00:12:45 crc kubenswrapper[5113]: I0130 00:12:45.432722 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hr22j" event={"ID":"6b49ee2f-63ad-4580-b29c-8ebcade15f14","Type":"ContainerDied","Data":"a89fbaaf3ca10e10c3e1a2ff3fec9623dabcc14884462984559b9881c07500f7"} Jan 30 00:12:45 crc kubenswrapper[5113]: I0130 00:12:45.442647 5113 generic.go:358] "Generic (PLEG): container finished" podID="ed2eb5b7-1d01-4029-86f8-47a057a0352e" containerID="f957aef456f39457593654e907c136da07d322fd6c11eb3666f083900e4edb1e" exitCode=0 Jan 30 00:12:45 crc kubenswrapper[5113]: I0130 00:12:45.442736 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbzxk" event={"ID":"ed2eb5b7-1d01-4029-86f8-47a057a0352e","Type":"ContainerDied","Data":"f957aef456f39457593654e907c136da07d322fd6c11eb3666f083900e4edb1e"} Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.015641 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hr22j" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.094294 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rbzxk" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.162615 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b49ee2f-63ad-4580-b29c-8ebcade15f14-utilities\") pod \"6b49ee2f-63ad-4580-b29c-8ebcade15f14\" (UID: \"6b49ee2f-63ad-4580-b29c-8ebcade15f14\") " Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.162687 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b49ee2f-63ad-4580-b29c-8ebcade15f14-catalog-content\") pod \"6b49ee2f-63ad-4580-b29c-8ebcade15f14\" (UID: \"6b49ee2f-63ad-4580-b29c-8ebcade15f14\") " Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.162743 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qrqf2\" (UniqueName: \"kubernetes.io/projected/ed2eb5b7-1d01-4029-86f8-47a057a0352e-kube-api-access-qrqf2\") pod \"ed2eb5b7-1d01-4029-86f8-47a057a0352e\" (UID: \"ed2eb5b7-1d01-4029-86f8-47a057a0352e\") " Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.162834 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mth44\" (UniqueName: \"kubernetes.io/projected/6b49ee2f-63ad-4580-b29c-8ebcade15f14-kube-api-access-mth44\") pod \"6b49ee2f-63ad-4580-b29c-8ebcade15f14\" (UID: \"6b49ee2f-63ad-4580-b29c-8ebcade15f14\") " Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.162918 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed2eb5b7-1d01-4029-86f8-47a057a0352e-utilities\") pod \"ed2eb5b7-1d01-4029-86f8-47a057a0352e\" (UID: \"ed2eb5b7-1d01-4029-86f8-47a057a0352e\") " Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.162966 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed2eb5b7-1d01-4029-86f8-47a057a0352e-catalog-content\") pod \"ed2eb5b7-1d01-4029-86f8-47a057a0352e\" (UID: \"ed2eb5b7-1d01-4029-86f8-47a057a0352e\") " Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.164639 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b49ee2f-63ad-4580-b29c-8ebcade15f14-utilities" (OuterVolumeSpecName: "utilities") pod "6b49ee2f-63ad-4580-b29c-8ebcade15f14" (UID: "6b49ee2f-63ad-4580-b29c-8ebcade15f14"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.166482 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed2eb5b7-1d01-4029-86f8-47a057a0352e-utilities" (OuterVolumeSpecName: "utilities") pod "ed2eb5b7-1d01-4029-86f8-47a057a0352e" (UID: "ed2eb5b7-1d01-4029-86f8-47a057a0352e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.170859 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed2eb5b7-1d01-4029-86f8-47a057a0352e-kube-api-access-qrqf2" (OuterVolumeSpecName: "kube-api-access-qrqf2") pod "ed2eb5b7-1d01-4029-86f8-47a057a0352e" (UID: "ed2eb5b7-1d01-4029-86f8-47a057a0352e"). InnerVolumeSpecName "kube-api-access-qrqf2". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.174355 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b49ee2f-63ad-4580-b29c-8ebcade15f14-kube-api-access-mth44" (OuterVolumeSpecName: "kube-api-access-mth44") pod "6b49ee2f-63ad-4580-b29c-8ebcade15f14" (UID: "6b49ee2f-63ad-4580-b29c-8ebcade15f14"). InnerVolumeSpecName "kube-api-access-mth44". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.180257 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lpzhr"] Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.180711 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lpzhr" podUID="9f80d9f8-15a6-42da-bfff-15ae5c525dca" containerName="registry-server" containerID="cri-o://157db1ad206fcec46c294ad298a9e43c22ead217ae8be09548b063062748f790" gracePeriod=2 Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.212359 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b49ee2f-63ad-4580-b29c-8ebcade15f14-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6b49ee2f-63ad-4580-b29c-8ebcade15f14" (UID: "6b49ee2f-63ad-4580-b29c-8ebcade15f14"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.231907 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed2eb5b7-1d01-4029-86f8-47a057a0352e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ed2eb5b7-1d01-4029-86f8-47a057a0352e" (UID: "ed2eb5b7-1d01-4029-86f8-47a057a0352e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.265589 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed2eb5b7-1d01-4029-86f8-47a057a0352e-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.266048 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed2eb5b7-1d01-4029-86f8-47a057a0352e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.266178 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b49ee2f-63ad-4580-b29c-8ebcade15f14-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.266294 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b49ee2f-63ad-4580-b29c-8ebcade15f14-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.266431 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-qrqf2\" (UniqueName: \"kubernetes.io/projected/ed2eb5b7-1d01-4029-86f8-47a057a0352e-kube-api-access-qrqf2\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.266580 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-mth44\" (UniqueName: \"kubernetes.io/projected/6b49ee2f-63ad-4580-b29c-8ebcade15f14-kube-api-access-mth44\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.454221 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbzxk" event={"ID":"ed2eb5b7-1d01-4029-86f8-47a057a0352e","Type":"ContainerDied","Data":"8f908eac179a11bd10cc8b711796baaa741868e2fd3d2a6cca58c0a3e4a459f9"} Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.455033 5113 scope.go:117] "RemoveContainer" containerID="f957aef456f39457593654e907c136da07d322fd6c11eb3666f083900e4edb1e" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.454559 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rbzxk" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.472319 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hr22j" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.472289 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hr22j" event={"ID":"6b49ee2f-63ad-4580-b29c-8ebcade15f14","Type":"ContainerDied","Data":"37cdf9f19179ad2b1894eb3048fd565c84c337985c1f4a9c460ac96d125c3515"} Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.477044 5113 generic.go:358] "Generic (PLEG): container finished" podID="9f80d9f8-15a6-42da-bfff-15ae5c525dca" containerID="157db1ad206fcec46c294ad298a9e43c22ead217ae8be09548b063062748f790" exitCode=0 Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.477109 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpzhr" event={"ID":"9f80d9f8-15a6-42da-bfff-15ae5c525dca","Type":"ContainerDied","Data":"157db1ad206fcec46c294ad298a9e43c22ead217ae8be09548b063062748f790"} Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.492915 5113 scope.go:117] "RemoveContainer" containerID="7791ae46c204e764134257a6adf200e96bb188bf0f3e36c075798fd5e83cb36b" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.498635 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rbzxk"] Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.502876 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rbzxk"] Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.516296 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hr22j"] Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.526032 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hr22j"] Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.529727 5113 scope.go:117] "RemoveContainer" containerID="18755a1375e01f5277b223cc953ca9964c813a0ceaa26f01a78f8005ac9b0694" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.546895 5113 scope.go:117] "RemoveContainer" containerID="a89fbaaf3ca10e10c3e1a2ff3fec9623dabcc14884462984559b9881c07500f7" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.563938 5113 scope.go:117] "RemoveContainer" containerID="8cc0b00eab75282eb4c2462f4f8ca60c31a5ee0dce6782039d29c47ae7a9de81" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.580430 5113 scope.go:117] "RemoveContainer" containerID="0e50c4da6ebbebbbc6a616e4a68a340536c2e42212315425689417cd71361071" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.581389 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lpzhr" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.674213 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f80d9f8-15a6-42da-bfff-15ae5c525dca-utilities\") pod \"9f80d9f8-15a6-42da-bfff-15ae5c525dca\" (UID: \"9f80d9f8-15a6-42da-bfff-15ae5c525dca\") " Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.674332 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f80d9f8-15a6-42da-bfff-15ae5c525dca-catalog-content\") pod \"9f80d9f8-15a6-42da-bfff-15ae5c525dca\" (UID: \"9f80d9f8-15a6-42da-bfff-15ae5c525dca\") " Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.674431 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5v8w2\" (UniqueName: \"kubernetes.io/projected/9f80d9f8-15a6-42da-bfff-15ae5c525dca-kube-api-access-5v8w2\") pod \"9f80d9f8-15a6-42da-bfff-15ae5c525dca\" (UID: \"9f80d9f8-15a6-42da-bfff-15ae5c525dca\") " Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.675331 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f80d9f8-15a6-42da-bfff-15ae5c525dca-utilities" (OuterVolumeSpecName: "utilities") pod "9f80d9f8-15a6-42da-bfff-15ae5c525dca" (UID: "9f80d9f8-15a6-42da-bfff-15ae5c525dca"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.681728 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f80d9f8-15a6-42da-bfff-15ae5c525dca-kube-api-access-5v8w2" (OuterVolumeSpecName: "kube-api-access-5v8w2") pod "9f80d9f8-15a6-42da-bfff-15ae5c525dca" (UID: "9f80d9f8-15a6-42da-bfff-15ae5c525dca"). InnerVolumeSpecName "kube-api-access-5v8w2". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.690544 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f80d9f8-15a6-42da-bfff-15ae5c525dca-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9f80d9f8-15a6-42da-bfff-15ae5c525dca" (UID: "9f80d9f8-15a6-42da-bfff-15ae5c525dca"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.776132 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f80d9f8-15a6-42da-bfff-15ae5c525dca-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.776181 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f80d9f8-15a6-42da-bfff-15ae5c525dca-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.776196 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-5v8w2\" (UniqueName: \"kubernetes.io/projected/9f80d9f8-15a6-42da-bfff-15ae5c525dca-kube-api-access-5v8w2\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.781765 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b49ee2f-63ad-4580-b29c-8ebcade15f14" path="/var/lib/kubelet/pods/6b49ee2f-63ad-4580-b29c-8ebcade15f14/volumes" Jan 30 00:12:46 crc kubenswrapper[5113]: I0130 00:12:46.782465 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed2eb5b7-1d01-4029-86f8-47a057a0352e" path="/var/lib/kubelet/pods/ed2eb5b7-1d01-4029-86f8-47a057a0352e/volumes" Jan 30 00:12:47 crc kubenswrapper[5113]: I0130 00:12:47.490645 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpzhr" event={"ID":"9f80d9f8-15a6-42da-bfff-15ae5c525dca","Type":"ContainerDied","Data":"ed47f1883fe5c990bc2001395f6d398d1b7fd926eb1cb57a387ba9a9389cc3a7"} Jan 30 00:12:47 crc kubenswrapper[5113]: I0130 00:12:47.491091 5113 scope.go:117] "RemoveContainer" containerID="157db1ad206fcec46c294ad298a9e43c22ead217ae8be09548b063062748f790" Jan 30 00:12:47 crc kubenswrapper[5113]: I0130 00:12:47.490821 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lpzhr" Jan 30 00:12:47 crc kubenswrapper[5113]: I0130 00:12:47.521167 5113 scope.go:117] "RemoveContainer" containerID="e65c6e7da0ea05fcea9cacb34320708ce7f9797bb910335627a656791b2ab71a" Jan 30 00:12:47 crc kubenswrapper[5113]: I0130 00:12:47.521318 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lpzhr"] Jan 30 00:12:47 crc kubenswrapper[5113]: I0130 00:12:47.522208 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lpzhr"] Jan 30 00:12:47 crc kubenswrapper[5113]: I0130 00:12:47.538150 5113 scope.go:117] "RemoveContainer" containerID="cd771ad4b861dd103dc6c7ad20fcd624f9c31b3e84c8549d33b5aa9617ac37f2" Jan 30 00:12:48 crc kubenswrapper[5113]: I0130 00:12:48.572310 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dgdzx"] Jan 30 00:12:48 crc kubenswrapper[5113]: I0130 00:12:48.573091 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dgdzx" podUID="b8b3ed2e-5058-4d4f-ba3a-e1a083531407" containerName="registry-server" containerID="cri-o://7c98f0602738b08851224b8f08d6883eb8c6cbe5cc1fb85fff0d14a5957d7ba8" gracePeriod=2 Jan 30 00:12:48 crc kubenswrapper[5113]: I0130 00:12:48.781732 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f80d9f8-15a6-42da-bfff-15ae5c525dca" path="/var/lib/kubelet/pods/9f80d9f8-15a6-42da-bfff-15ae5c525dca/volumes" Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.058265 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dgdzx" Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.128345 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-utilities\") pod \"b8b3ed2e-5058-4d4f-ba3a-e1a083531407\" (UID: \"b8b3ed2e-5058-4d4f-ba3a-e1a083531407\") " Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.128512 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-catalog-content\") pod \"b8b3ed2e-5058-4d4f-ba3a-e1a083531407\" (UID: \"b8b3ed2e-5058-4d4f-ba3a-e1a083531407\") " Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.128612 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7blrm\" (UniqueName: \"kubernetes.io/projected/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-kube-api-access-7blrm\") pod \"b8b3ed2e-5058-4d4f-ba3a-e1a083531407\" (UID: \"b8b3ed2e-5058-4d4f-ba3a-e1a083531407\") " Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.130047 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-utilities" (OuterVolumeSpecName: "utilities") pod "b8b3ed2e-5058-4d4f-ba3a-e1a083531407" (UID: "b8b3ed2e-5058-4d4f-ba3a-e1a083531407"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.144733 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-kube-api-access-7blrm" (OuterVolumeSpecName: "kube-api-access-7blrm") pod "b8b3ed2e-5058-4d4f-ba3a-e1a083531407" (UID: "b8b3ed2e-5058-4d4f-ba3a-e1a083531407"). InnerVolumeSpecName "kube-api-access-7blrm". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.230725 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.231319 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-7blrm\" (UniqueName: \"kubernetes.io/projected/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-kube-api-access-7blrm\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.306197 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b8b3ed2e-5058-4d4f-ba3a-e1a083531407" (UID: "b8b3ed2e-5058-4d4f-ba3a-e1a083531407"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.333603 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8b3ed2e-5058-4d4f-ba3a-e1a083531407-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.508884 5113 generic.go:358] "Generic (PLEG): container finished" podID="b8b3ed2e-5058-4d4f-ba3a-e1a083531407" containerID="7c98f0602738b08851224b8f08d6883eb8c6cbe5cc1fb85fff0d14a5957d7ba8" exitCode=0 Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.508987 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dgdzx" event={"ID":"b8b3ed2e-5058-4d4f-ba3a-e1a083531407","Type":"ContainerDied","Data":"7c98f0602738b08851224b8f08d6883eb8c6cbe5cc1fb85fff0d14a5957d7ba8"} Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.509048 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dgdzx" event={"ID":"b8b3ed2e-5058-4d4f-ba3a-e1a083531407","Type":"ContainerDied","Data":"fa82106cd5f8efa5c8b15274fff3622e11e66133eeca5491c442394e38438d4d"} Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.509085 5113 scope.go:117] "RemoveContainer" containerID="7c98f0602738b08851224b8f08d6883eb8c6cbe5cc1fb85fff0d14a5957d7ba8" Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.509758 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dgdzx" Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.532770 5113 scope.go:117] "RemoveContainer" containerID="e45284d1171a848f6e0182756bc91c5ed5a38ff079c3d4774b6601665f21871c" Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.555220 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dgdzx"] Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.559957 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dgdzx"] Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.580353 5113 scope.go:117] "RemoveContainer" containerID="61986be761658ca62966f6c06e4e6d9d0a0999ea6c0e87ded2f78581da0f943d" Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.597794 5113 scope.go:117] "RemoveContainer" containerID="7c98f0602738b08851224b8f08d6883eb8c6cbe5cc1fb85fff0d14a5957d7ba8" Jan 30 00:12:49 crc kubenswrapper[5113]: E0130 00:12:49.598219 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c98f0602738b08851224b8f08d6883eb8c6cbe5cc1fb85fff0d14a5957d7ba8\": container with ID starting with 7c98f0602738b08851224b8f08d6883eb8c6cbe5cc1fb85fff0d14a5957d7ba8 not found: ID does not exist" containerID="7c98f0602738b08851224b8f08d6883eb8c6cbe5cc1fb85fff0d14a5957d7ba8" Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.598268 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c98f0602738b08851224b8f08d6883eb8c6cbe5cc1fb85fff0d14a5957d7ba8"} err="failed to get container status \"7c98f0602738b08851224b8f08d6883eb8c6cbe5cc1fb85fff0d14a5957d7ba8\": rpc error: code = NotFound desc = could not find container \"7c98f0602738b08851224b8f08d6883eb8c6cbe5cc1fb85fff0d14a5957d7ba8\": container with ID starting with 7c98f0602738b08851224b8f08d6883eb8c6cbe5cc1fb85fff0d14a5957d7ba8 not found: ID does not exist" Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.598304 5113 scope.go:117] "RemoveContainer" containerID="e45284d1171a848f6e0182756bc91c5ed5a38ff079c3d4774b6601665f21871c" Jan 30 00:12:49 crc kubenswrapper[5113]: E0130 00:12:49.598757 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e45284d1171a848f6e0182756bc91c5ed5a38ff079c3d4774b6601665f21871c\": container with ID starting with e45284d1171a848f6e0182756bc91c5ed5a38ff079c3d4774b6601665f21871c not found: ID does not exist" containerID="e45284d1171a848f6e0182756bc91c5ed5a38ff079c3d4774b6601665f21871c" Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.598877 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e45284d1171a848f6e0182756bc91c5ed5a38ff079c3d4774b6601665f21871c"} err="failed to get container status \"e45284d1171a848f6e0182756bc91c5ed5a38ff079c3d4774b6601665f21871c\": rpc error: code = NotFound desc = could not find container \"e45284d1171a848f6e0182756bc91c5ed5a38ff079c3d4774b6601665f21871c\": container with ID starting with e45284d1171a848f6e0182756bc91c5ed5a38ff079c3d4774b6601665f21871c not found: ID does not exist" Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.598949 5113 scope.go:117] "RemoveContainer" containerID="61986be761658ca62966f6c06e4e6d9d0a0999ea6c0e87ded2f78581da0f943d" Jan 30 00:12:49 crc kubenswrapper[5113]: E0130 00:12:49.599502 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61986be761658ca62966f6c06e4e6d9d0a0999ea6c0e87ded2f78581da0f943d\": container with ID starting with 61986be761658ca62966f6c06e4e6d9d0a0999ea6c0e87ded2f78581da0f943d not found: ID does not exist" containerID="61986be761658ca62966f6c06e4e6d9d0a0999ea6c0e87ded2f78581da0f943d" Jan 30 00:12:49 crc kubenswrapper[5113]: I0130 00:12:49.599551 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61986be761658ca62966f6c06e4e6d9d0a0999ea6c0e87ded2f78581da0f943d"} err="failed to get container status \"61986be761658ca62966f6c06e4e6d9d0a0999ea6c0e87ded2f78581da0f943d\": rpc error: code = NotFound desc = could not find container \"61986be761658ca62966f6c06e4e6d9d0a0999ea6c0e87ded2f78581da0f943d\": container with ID starting with 61986be761658ca62966f6c06e4e6d9d0a0999ea6c0e87ded2f78581da0f943d not found: ID does not exist" Jan 30 00:12:50 crc kubenswrapper[5113]: I0130 00:12:50.785361 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8b3ed2e-5058-4d4f-ba3a-e1a083531407" path="/var/lib/kubelet/pods/b8b3ed2e-5058-4d4f-ba3a-e1a083531407/volumes" Jan 30 00:12:52 crc kubenswrapper[5113]: I0130 00:12:52.508825 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-66458b6674-65wnm"] Jan 30 00:13:06 crc kubenswrapper[5113]: I0130 00:13:06.727417 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-c6c5d6787-f7lmt"] Jan 30 00:13:06 crc kubenswrapper[5113]: I0130 00:13:06.728231 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" podUID="d82500ff-9b92-4588-bfec-2df60e572d61" containerName="controller-manager" containerID="cri-o://503c2a37a2be3949aacb53edba8597c5d72bff602194a8fe003aaee8ac396796" gracePeriod=30 Jan 30 00:13:06 crc kubenswrapper[5113]: I0130 00:13:06.736150 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r"] Jan 30 00:13:06 crc kubenswrapper[5113]: I0130 00:13:06.736413 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" podUID="2be7a636-2fc6-4739-a559-a637553541ad" containerName="route-controller-manager" containerID="cri-o://ec904ccacd4150674fce861523957adbc05b98266b30bd133861ed268e8ef034" gracePeriod=30 Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.245813 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.297776 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs"] Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298584 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="6b49ee2f-63ad-4580-b29c-8ebcade15f14" containerName="extract-utilities" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298608 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b49ee2f-63ad-4580-b29c-8ebcade15f14" containerName="extract-utilities" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298619 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="ed2eb5b7-1d01-4029-86f8-47a057a0352e" containerName="extract-utilities" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298627 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed2eb5b7-1d01-4029-86f8-47a057a0352e" containerName="extract-utilities" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298648 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="9f80d9f8-15a6-42da-bfff-15ae5c525dca" containerName="extract-content" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298655 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f80d9f8-15a6-42da-bfff-15ae5c525dca" containerName="extract-content" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298675 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="6b49ee2f-63ad-4580-b29c-8ebcade15f14" containerName="extract-content" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298683 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b49ee2f-63ad-4580-b29c-8ebcade15f14" containerName="extract-content" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298697 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="2be7a636-2fc6-4739-a559-a637553541ad" containerName="route-controller-manager" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298704 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="2be7a636-2fc6-4739-a559-a637553541ad" containerName="route-controller-manager" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298722 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="b8b3ed2e-5058-4d4f-ba3a-e1a083531407" containerName="extract-content" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298731 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8b3ed2e-5058-4d4f-ba3a-e1a083531407" containerName="extract-content" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298742 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="b8b3ed2e-5058-4d4f-ba3a-e1a083531407" containerName="registry-server" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298750 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8b3ed2e-5058-4d4f-ba3a-e1a083531407" containerName="registry-server" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298760 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="b8b3ed2e-5058-4d4f-ba3a-e1a083531407" containerName="extract-utilities" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298767 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8b3ed2e-5058-4d4f-ba3a-e1a083531407" containerName="extract-utilities" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298780 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="9f80d9f8-15a6-42da-bfff-15ae5c525dca" containerName="extract-utilities" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298788 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f80d9f8-15a6-42da-bfff-15ae5c525dca" containerName="extract-utilities" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298798 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="9f80d9f8-15a6-42da-bfff-15ae5c525dca" containerName="registry-server" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298805 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f80d9f8-15a6-42da-bfff-15ae5c525dca" containerName="registry-server" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298815 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="dc8879ad-79d6-4006-8b73-2a881b742008" containerName="pruner" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298822 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc8879ad-79d6-4006-8b73-2a881b742008" containerName="pruner" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298833 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="6b49ee2f-63ad-4580-b29c-8ebcade15f14" containerName="registry-server" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298840 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b49ee2f-63ad-4580-b29c-8ebcade15f14" containerName="registry-server" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298851 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="ed2eb5b7-1d01-4029-86f8-47a057a0352e" containerName="registry-server" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298859 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed2eb5b7-1d01-4029-86f8-47a057a0352e" containerName="registry-server" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298873 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="ed2eb5b7-1d01-4029-86f8-47a057a0352e" containerName="extract-content" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.298882 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed2eb5b7-1d01-4029-86f8-47a057a0352e" containerName="extract-content" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.299020 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="9f80d9f8-15a6-42da-bfff-15ae5c525dca" containerName="registry-server" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.299039 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="b8b3ed2e-5058-4d4f-ba3a-e1a083531407" containerName="registry-server" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.299049 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="6b49ee2f-63ad-4580-b29c-8ebcade15f14" containerName="registry-server" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.299060 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="dc8879ad-79d6-4006-8b73-2a881b742008" containerName="pruner" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.299072 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="ed2eb5b7-1d01-4029-86f8-47a057a0352e" containerName="registry-server" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.299082 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="2be7a636-2fc6-4739-a559-a637553541ad" containerName="route-controller-manager" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.307394 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs"] Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.307562 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.326350 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2be7a636-2fc6-4739-a559-a637553541ad-tmp\") pod \"2be7a636-2fc6-4739-a559-a637553541ad\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.326401 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2be7a636-2fc6-4739-a559-a637553541ad-client-ca\") pod \"2be7a636-2fc6-4739-a559-a637553541ad\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.326572 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2be7a636-2fc6-4739-a559-a637553541ad-config\") pod \"2be7a636-2fc6-4739-a559-a637553541ad\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.326643 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2be7a636-2fc6-4739-a559-a637553541ad-serving-cert\") pod \"2be7a636-2fc6-4739-a559-a637553541ad\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.326675 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwfc7\" (UniqueName: \"kubernetes.io/projected/2be7a636-2fc6-4739-a559-a637553541ad-kube-api-access-xwfc7\") pod \"2be7a636-2fc6-4739-a559-a637553541ad\" (UID: \"2be7a636-2fc6-4739-a559-a637553541ad\") " Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.327183 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2be7a636-2fc6-4739-a559-a637553541ad-tmp" (OuterVolumeSpecName: "tmp") pod "2be7a636-2fc6-4739-a559-a637553541ad" (UID: "2be7a636-2fc6-4739-a559-a637553541ad"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.327824 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2be7a636-2fc6-4739-a559-a637553541ad-config" (OuterVolumeSpecName: "config") pod "2be7a636-2fc6-4739-a559-a637553541ad" (UID: "2be7a636-2fc6-4739-a559-a637553541ad"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.328778 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2be7a636-2fc6-4739-a559-a637553541ad-client-ca" (OuterVolumeSpecName: "client-ca") pod "2be7a636-2fc6-4739-a559-a637553541ad" (UID: "2be7a636-2fc6-4739-a559-a637553541ad"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.340105 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2be7a636-2fc6-4739-a559-a637553541ad-kube-api-access-xwfc7" (OuterVolumeSpecName: "kube-api-access-xwfc7") pod "2be7a636-2fc6-4739-a559-a637553541ad" (UID: "2be7a636-2fc6-4739-a559-a637553541ad"). InnerVolumeSpecName "kube-api-access-xwfc7". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.343720 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2be7a636-2fc6-4739-a559-a637553541ad-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "2be7a636-2fc6-4739-a559-a637553541ad" (UID: "2be7a636-2fc6-4739-a559-a637553541ad"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.408007 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.423813 5113 generic.go:358] "Generic (PLEG): container finished" podID="2be7a636-2fc6-4739-a559-a637553541ad" containerID="ec904ccacd4150674fce861523957adbc05b98266b30bd133861ed268e8ef034" exitCode=0 Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.423898 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" event={"ID":"2be7a636-2fc6-4739-a559-a637553541ad","Type":"ContainerDied","Data":"ec904ccacd4150674fce861523957adbc05b98266b30bd133861ed268e8ef034"} Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.423928 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" event={"ID":"2be7a636-2fc6-4739-a559-a637553541ad","Type":"ContainerDied","Data":"5e3ebe73514aa5855b6b11c4e3d6ea9c8f31e47cd50b8c7c2264f818ea284162"} Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.423944 5113 scope.go:117] "RemoveContainer" containerID="ec904ccacd4150674fce861523957adbc05b98266b30bd133861ed268e8ef034" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.424057 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.428730 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-client-ca\") pod \"route-controller-manager-65d7f4859c-5kgzs\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.428788 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-tmp\") pod \"route-controller-manager-65d7f4859c-5kgzs\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.428835 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-serving-cert\") pod \"route-controller-manager-65d7f4859c-5kgzs\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.428902 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-config\") pod \"route-controller-manager-65d7f4859c-5kgzs\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.428927 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndmzf\" (UniqueName: \"kubernetes.io/projected/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-kube-api-access-ndmzf\") pod \"route-controller-manager-65d7f4859c-5kgzs\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.428977 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2be7a636-2fc6-4739-a559-a637553541ad-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.428990 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2be7a636-2fc6-4739-a559-a637553541ad-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.429001 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-xwfc7\" (UniqueName: \"kubernetes.io/projected/2be7a636-2fc6-4739-a559-a637553541ad-kube-api-access-xwfc7\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.429011 5113 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/2be7a636-2fc6-4739-a559-a637553541ad-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.429019 5113 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2be7a636-2fc6-4739-a559-a637553541ad-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.431278 5113 generic.go:358] "Generic (PLEG): container finished" podID="d82500ff-9b92-4588-bfec-2df60e572d61" containerID="503c2a37a2be3949aacb53edba8597c5d72bff602194a8fe003aaee8ac396796" exitCode=0 Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.431379 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.431465 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" event={"ID":"d82500ff-9b92-4588-bfec-2df60e572d61","Type":"ContainerDied","Data":"503c2a37a2be3949aacb53edba8597c5d72bff602194a8fe003aaee8ac396796"} Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.431585 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c6c5d6787-f7lmt" event={"ID":"d82500ff-9b92-4588-bfec-2df60e572d61","Type":"ContainerDied","Data":"fc99f1e3173b957fb47f320669194e250d669d8b28d53791b95388557aab930c"} Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.438308 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq"] Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.439128 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="d82500ff-9b92-4588-bfec-2df60e572d61" containerName="controller-manager" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.439215 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="d82500ff-9b92-4588-bfec-2df60e572d61" containerName="controller-manager" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.439381 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="d82500ff-9b92-4588-bfec-2df60e572d61" containerName="controller-manager" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.450292 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq"] Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.450533 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.451916 5113 scope.go:117] "RemoveContainer" containerID="ec904ccacd4150674fce861523957adbc05b98266b30bd133861ed268e8ef034" Jan 30 00:13:07 crc kubenswrapper[5113]: E0130 00:13:07.452471 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec904ccacd4150674fce861523957adbc05b98266b30bd133861ed268e8ef034\": container with ID starting with ec904ccacd4150674fce861523957adbc05b98266b30bd133861ed268e8ef034 not found: ID does not exist" containerID="ec904ccacd4150674fce861523957adbc05b98266b30bd133861ed268e8ef034" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.452501 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec904ccacd4150674fce861523957adbc05b98266b30bd133861ed268e8ef034"} err="failed to get container status \"ec904ccacd4150674fce861523957adbc05b98266b30bd133861ed268e8ef034\": rpc error: code = NotFound desc = could not find container \"ec904ccacd4150674fce861523957adbc05b98266b30bd133861ed268e8ef034\": container with ID starting with ec904ccacd4150674fce861523957adbc05b98266b30bd133861ed268e8ef034 not found: ID does not exist" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.452551 5113 scope.go:117] "RemoveContainer" containerID="503c2a37a2be3949aacb53edba8597c5d72bff602194a8fe003aaee8ac396796" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.467140 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r"] Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.471053 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-598bb7b487-b6l9r"] Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.480362 5113 scope.go:117] "RemoveContainer" containerID="503c2a37a2be3949aacb53edba8597c5d72bff602194a8fe003aaee8ac396796" Jan 30 00:13:07 crc kubenswrapper[5113]: E0130 00:13:07.481165 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"503c2a37a2be3949aacb53edba8597c5d72bff602194a8fe003aaee8ac396796\": container with ID starting with 503c2a37a2be3949aacb53edba8597c5d72bff602194a8fe003aaee8ac396796 not found: ID does not exist" containerID="503c2a37a2be3949aacb53edba8597c5d72bff602194a8fe003aaee8ac396796" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.481292 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"503c2a37a2be3949aacb53edba8597c5d72bff602194a8fe003aaee8ac396796"} err="failed to get container status \"503c2a37a2be3949aacb53edba8597c5d72bff602194a8fe003aaee8ac396796\": rpc error: code = NotFound desc = could not find container \"503c2a37a2be3949aacb53edba8597c5d72bff602194a8fe003aaee8ac396796\": container with ID starting with 503c2a37a2be3949aacb53edba8597c5d72bff602194a8fe003aaee8ac396796 not found: ID does not exist" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.530398 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d82500ff-9b92-4588-bfec-2df60e572d61-serving-cert\") pod \"d82500ff-9b92-4588-bfec-2df60e572d61\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.530501 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/d82500ff-9b92-4588-bfec-2df60e572d61-tmp\") pod \"d82500ff-9b92-4588-bfec-2df60e572d61\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.530738 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-proxy-ca-bundles\") pod \"d82500ff-9b92-4588-bfec-2df60e572d61\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.531113 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-client-ca\") pod \"d82500ff-9b92-4588-bfec-2df60e572d61\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.531192 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zb9w2\" (UniqueName: \"kubernetes.io/projected/d82500ff-9b92-4588-bfec-2df60e572d61-kube-api-access-zb9w2\") pod \"d82500ff-9b92-4588-bfec-2df60e572d61\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.531275 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-config\") pod \"d82500ff-9b92-4588-bfec-2df60e572d61\" (UID: \"d82500ff-9b92-4588-bfec-2df60e572d61\") " Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.531455 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d82500ff-9b92-4588-bfec-2df60e572d61-tmp" (OuterVolumeSpecName: "tmp") pod "d82500ff-9b92-4588-bfec-2df60e572d61" (UID: "d82500ff-9b92-4588-bfec-2df60e572d61"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.531704 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-proxy-ca-bundles\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.531840 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/62ca7dac-0e59-4c7e-9781-2a06507294a0-tmp\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.531995 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/62ca7dac-0e59-4c7e-9781-2a06507294a0-serving-cert\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.532142 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-config" (OuterVolumeSpecName: "config") pod "d82500ff-9b92-4588-bfec-2df60e572d61" (UID: "d82500ff-9b92-4588-bfec-2df60e572d61"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.532151 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "d82500ff-9b92-4588-bfec-2df60e572d61" (UID: "d82500ff-9b92-4588-bfec-2df60e572d61"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.532254 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-config\") pod \"route-controller-manager-65d7f4859c-5kgzs\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.532316 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-ndmzf\" (UniqueName: \"kubernetes.io/projected/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-kube-api-access-ndmzf\") pod \"route-controller-manager-65d7f4859c-5kgzs\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.532374 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8bgv\" (UniqueName: \"kubernetes.io/projected/62ca7dac-0e59-4c7e-9781-2a06507294a0-kube-api-access-t8bgv\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.532415 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-client-ca\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.532475 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-client-ca\") pod \"route-controller-manager-65d7f4859c-5kgzs\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.532597 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-client-ca" (OuterVolumeSpecName: "client-ca") pod "d82500ff-9b92-4588-bfec-2df60e572d61" (UID: "d82500ff-9b92-4588-bfec-2df60e572d61"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.532621 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-tmp\") pod \"route-controller-manager-65d7f4859c-5kgzs\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.532918 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-serving-cert\") pod \"route-controller-manager-65d7f4859c-5kgzs\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.533018 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-config\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.533107 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-tmp\") pod \"route-controller-manager-65d7f4859c-5kgzs\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.533497 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-client-ca\") pod \"route-controller-manager-65d7f4859c-5kgzs\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.533945 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-config\") pod \"route-controller-manager-65d7f4859c-5kgzs\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.535101 5113 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/d82500ff-9b92-4588-bfec-2df60e572d61-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.535245 5113 reconciler_common.go:299] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.535427 5113 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.535513 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d82500ff-9b92-4588-bfec-2df60e572d61-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.535926 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d82500ff-9b92-4588-bfec-2df60e572d61-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d82500ff-9b92-4588-bfec-2df60e572d61" (UID: "d82500ff-9b92-4588-bfec-2df60e572d61"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.536363 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d82500ff-9b92-4588-bfec-2df60e572d61-kube-api-access-zb9w2" (OuterVolumeSpecName: "kube-api-access-zb9w2") pod "d82500ff-9b92-4588-bfec-2df60e572d61" (UID: "d82500ff-9b92-4588-bfec-2df60e572d61"). InnerVolumeSpecName "kube-api-access-zb9w2". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.537164 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-serving-cert\") pod \"route-controller-manager-65d7f4859c-5kgzs\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.549372 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndmzf\" (UniqueName: \"kubernetes.io/projected/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-kube-api-access-ndmzf\") pod \"route-controller-manager-65d7f4859c-5kgzs\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.621582 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.636713 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-t8bgv\" (UniqueName: \"kubernetes.io/projected/62ca7dac-0e59-4c7e-9781-2a06507294a0-kube-api-access-t8bgv\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.636844 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-client-ca\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.636970 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-config\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.637098 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-proxy-ca-bundles\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.637194 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/62ca7dac-0e59-4c7e-9781-2a06507294a0-tmp\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.637271 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/62ca7dac-0e59-4c7e-9781-2a06507294a0-serving-cert\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.637381 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d82500ff-9b92-4588-bfec-2df60e572d61-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.637460 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-zb9w2\" (UniqueName: \"kubernetes.io/projected/d82500ff-9b92-4588-bfec-2df60e572d61-kube-api-access-zb9w2\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.638661 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/62ca7dac-0e59-4c7e-9781-2a06507294a0-tmp\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.639452 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-client-ca\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.639639 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-config\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.642885 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-proxy-ca-bundles\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.646364 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/62ca7dac-0e59-4c7e-9781-2a06507294a0-serving-cert\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.659204 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8bgv\" (UniqueName: \"kubernetes.io/projected/62ca7dac-0e59-4c7e-9781-2a06507294a0-kube-api-access-t8bgv\") pod \"controller-manager-c9d77bb8f-rbfjq\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.722883 5113 ???:1] "http: TLS handshake error from 192.168.126.11:60780: no serving certificate available for the kubelet" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.768367 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-c6c5d6787-f7lmt"] Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.770683 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.774644 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-c6c5d6787-f7lmt"] Jan 30 00:13:07 crc kubenswrapper[5113]: I0130 00:13:07.891495 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs"] Jan 30 00:13:07 crc kubenswrapper[5113]: W0130 00:13:07.905756 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf5f80b8d_d118_4c70_bf23_bd56e888bb3b.slice/crio-ebeba753f12123dfbda50c77451e4c4a68ec86e44d2e6bda1266e7334d6c2107 WatchSource:0}: Error finding container ebeba753f12123dfbda50c77451e4c4a68ec86e44d2e6bda1266e7334d6c2107: Status 404 returned error can't find the container with id ebeba753f12123dfbda50c77451e4c4a68ec86e44d2e6bda1266e7334d6c2107 Jan 30 00:13:08 crc kubenswrapper[5113]: I0130 00:13:08.226849 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq"] Jan 30 00:13:08 crc kubenswrapper[5113]: I0130 00:13:08.441266 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" event={"ID":"f5f80b8d-d118-4c70-bf23-bd56e888bb3b","Type":"ContainerStarted","Data":"4bc3b7d3c498c659e3fd157c3c91255382792c444d97cba2440670332341814f"} Jan 30 00:13:08 crc kubenswrapper[5113]: I0130 00:13:08.441367 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" event={"ID":"f5f80b8d-d118-4c70-bf23-bd56e888bb3b","Type":"ContainerStarted","Data":"ebeba753f12123dfbda50c77451e4c4a68ec86e44d2e6bda1266e7334d6c2107"} Jan 30 00:13:08 crc kubenswrapper[5113]: I0130 00:13:08.441630 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:08 crc kubenswrapper[5113]: I0130 00:13:08.444459 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" event={"ID":"62ca7dac-0e59-4c7e-9781-2a06507294a0","Type":"ContainerStarted","Data":"1f61523e196bba5340f11e1ab683537f2b5ced12d33d3f08c8b95f9288d3ee8d"} Jan 30 00:13:08 crc kubenswrapper[5113]: I0130 00:13:08.445078 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:08 crc kubenswrapper[5113]: I0130 00:13:08.445100 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" event={"ID":"62ca7dac-0e59-4c7e-9781-2a06507294a0","Type":"ContainerStarted","Data":"b35dfcb8946f8c7dcb504a3f71b116b48e19f3ae5d0f308fbcef0efec04e7491"} Jan 30 00:13:08 crc kubenswrapper[5113]: I0130 00:13:08.447546 5113 patch_prober.go:28] interesting pod/controller-manager-c9d77bb8f-rbfjq container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.62:8443/healthz\": dial tcp 10.217.0.62:8443: connect: connection refused" start-of-body= Jan 30 00:13:08 crc kubenswrapper[5113]: I0130 00:13:08.447598 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" podUID="62ca7dac-0e59-4c7e-9781-2a06507294a0" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.62:8443/healthz\": dial tcp 10.217.0.62:8443: connect: connection refused" Jan 30 00:13:08 crc kubenswrapper[5113]: I0130 00:13:08.465022 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" podStartSLOduration=2.464997199 podStartE2EDuration="2.464997199s" podCreationTimestamp="2026-01-30 00:13:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:13:08.464403171 +0000 UTC m=+208.537008568" watchObservedRunningTime="2026-01-30 00:13:08.464997199 +0000 UTC m=+208.537602586" Jan 30 00:13:08 crc kubenswrapper[5113]: I0130 00:13:08.485731 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" podStartSLOduration=2.48569515 podStartE2EDuration="2.48569515s" podCreationTimestamp="2026-01-30 00:13:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:13:08.4824705 +0000 UTC m=+208.555075897" watchObservedRunningTime="2026-01-30 00:13:08.48569515 +0000 UTC m=+208.558300537" Jan 30 00:13:08 crc kubenswrapper[5113]: I0130 00:13:08.781183 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2be7a636-2fc6-4739-a559-a637553541ad" path="/var/lib/kubelet/pods/2be7a636-2fc6-4739-a559-a637553541ad/volumes" Jan 30 00:13:08 crc kubenswrapper[5113]: I0130 00:13:08.782359 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d82500ff-9b92-4588-bfec-2df60e572d61" path="/var/lib/kubelet/pods/d82500ff-9b92-4588-bfec-2df60e572d61/volumes" Jan 30 00:13:09 crc kubenswrapper[5113]: I0130 00:13:09.305954 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:13:09 crc kubenswrapper[5113]: I0130 00:13:09.460426 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.208863 5113 kubelet.go:2547] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.211224 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://77c6a4ce119d456c938cd7b1ab2a1187857c309661ce7a3c4ebafdc977385d24" gracePeriod=15 Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.211311 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-cert-syncer" containerID="cri-o://3a34c408f150a15c63352bc45a7746036fdb5242aeda8b3d3f68c530dcacca16" gracePeriod=15 Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.211243 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://10b39007b1d5476031fa74088ab1fc27641cd0d8637344b799ccb7bd0d7ed170" gracePeriod=15 Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.211361 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" containerID="cri-o://a9ddcb5477ffdd58c27676a12d79a7574b5e7d2ad48fd0a4349b255dd4bd3dd2" gracePeriod=15 Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.211590 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver" containerID="cri-o://f193a62499ba084b7bc2ab4965ca8f7c645e6f17135a246bce0bba25105ae580" gracePeriod=15 Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.293142 5113 kubelet.go:2537] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.294552 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-insecure-readyz" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.294698 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-insecure-readyz" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.294814 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="setup" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.294898 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="setup" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.294991 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.295072 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.295150 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.295234 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.295333 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-cert-regeneration-controller" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.295418 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-cert-regeneration-controller" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.295500 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-cert-syncer" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.295610 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-cert-syncer" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.295709 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.295864 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.296000 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.296113 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.296235 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.296346 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.296698 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-cert-regeneration-controller" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.296828 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.296914 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.297124 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-cert-syncer" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.297203 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.297280 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-insecure-readyz" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.297367 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.297659 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.297761 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.297995 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.298091 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="3a14caf222afb62aaabdc47808b6f944" containerName="kube-apiserver-check-endpoints" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.302682 5113 kubelet.go:2537] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.315998 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.317283 5113 status_manager.go:895] "Failed to get status for pod" podUID="3a14caf222afb62aaabdc47808b6f944" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.359496 5113 kubelet.go:3340] "Creating a mirror pod for static pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: E0130 00:13:16.360345 5113 kubelet.go:3342] "Failed creating a mirror pod" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.251:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.391263 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.391367 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.391629 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.391803 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.391931 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/57755cc5f99000cc11e193051474d4e2-tmp-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.392011 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-bundle-dir\" (UniqueName: \"kubernetes.io/empty-dir/57755cc5f99000cc11e193051474d4e2-ca-bundle-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.392112 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.392156 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.392189 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.392247 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.494095 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.494617 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/57755cc5f99000cc11e193051474d4e2-tmp-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.494660 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ca-bundle-dir\" (UniqueName: \"kubernetes.io/empty-dir/57755cc5f99000cc11e193051474d4e2-ca-bundle-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.494698 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.494726 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.494747 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.494780 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.494817 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.494849 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.494889 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.494989 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.494355 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.495565 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.495586 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.495645 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.495749 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.495755 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/57755cc5f99000cc11e193051474d4e2-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.495783 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.495918 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/57755cc5f99000cc11e193051474d4e2-tmp-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.495973 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ca-bundle-dir\" (UniqueName: \"kubernetes.io/empty-dir/57755cc5f99000cc11e193051474d4e2-ca-bundle-dir\") pod \"kube-apiserver-crc\" (UID: \"57755cc5f99000cc11e193051474d4e2\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.502776 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-check-endpoints/3.log" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.504465 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-cert-syncer/0.log" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.505907 5113 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="a9ddcb5477ffdd58c27676a12d79a7574b5e7d2ad48fd0a4349b255dd4bd3dd2" exitCode=0 Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.505945 5113 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="77c6a4ce119d456c938cd7b1ab2a1187857c309661ce7a3c4ebafdc977385d24" exitCode=0 Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.505953 5113 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="10b39007b1d5476031fa74088ab1fc27641cd0d8637344b799ccb7bd0d7ed170" exitCode=0 Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.505961 5113 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="3a34c408f150a15c63352bc45a7746036fdb5242aeda8b3d3f68c530dcacca16" exitCode=2 Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.506221 5113 scope.go:117] "RemoveContainer" containerID="524b98e238697d411aaaba56575ee93f25656aefb79f17572a1504e3f52a32ef" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.510277 5113 generic.go:358] "Generic (PLEG): container finished" podID="6ab098a6-e349-4f74-b76f-e0ff9261030e" containerID="906091be34327e7619ec031d5a6d060ef1fae14147a318294d1611733604af7e" exitCode=0 Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.510390 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-12-crc" event={"ID":"6ab098a6-e349-4f74-b76f-e0ff9261030e","Type":"ContainerDied","Data":"906091be34327e7619ec031d5a6d060ef1fae14147a318294d1611733604af7e"} Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.511407 5113 status_manager.go:895] "Failed to get status for pod" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.511908 5113 status_manager.go:895] "Failed to get status for pod" podUID="3a14caf222afb62aaabdc47808b6f944" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:16 crc kubenswrapper[5113]: I0130 00:13:16.661508 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:16 crc kubenswrapper[5113]: E0130 00:13:16.691157 5113 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.251:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188f59dfe260fc5b openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f7dbc7e1ee9c187a863ef9b473fad27b,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:13:16.690480219 +0000 UTC m=+216.763085606,LastTimestamp:2026-01-30 00:13:16.690480219 +0000 UTC m=+216.763085606,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:13:17 crc kubenswrapper[5113]: I0130 00:13:17.536583 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f7dbc7e1ee9c187a863ef9b473fad27b","Type":"ContainerStarted","Data":"c50078fe466ad595a60e90b7e6487c2280d765dfaac1c517b5804bcfe6c080e2"} Jan 30 00:13:17 crc kubenswrapper[5113]: I0130 00:13:17.537077 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f7dbc7e1ee9c187a863ef9b473fad27b","Type":"ContainerStarted","Data":"9cbc12a6623bb71d92ea31eadac86248debbbd1b1cc206efeb08b03a467d43d2"} Jan 30 00:13:17 crc kubenswrapper[5113]: I0130 00:13:17.537799 5113 kubelet.go:3340] "Creating a mirror pod for static pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:17 crc kubenswrapper[5113]: E0130 00:13:17.538566 5113 kubelet.go:3342] "Failed creating a mirror pod" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.251:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:13:17 crc kubenswrapper[5113]: I0130 00:13:17.538690 5113 status_manager.go:895] "Failed to get status for pod" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:17 crc kubenswrapper[5113]: I0130 00:13:17.541893 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-cert-syncer/0.log" Jan 30 00:13:17 crc kubenswrapper[5113]: I0130 00:13:17.553247 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" containerName="oauth-openshift" containerID="cri-o://44303fbf12cf56eb2af44dda163ec8294445ec52bf54b2dd9742433cef3671d7" gracePeriod=15 Jan 30 00:13:17 crc kubenswrapper[5113]: E0130 00:13:17.772544 5113 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.251:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188f59dfe260fc5b openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f7dbc7e1ee9c187a863ef9b473fad27b,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:13:16.690480219 +0000 UTC m=+216.763085606,LastTimestamp:2026-01-30 00:13:16.690480219 +0000 UTC m=+216.763085606,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.014890 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.015592 5113 status_manager.go:895] "Failed to get status for pod" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.019288 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.020198 5113 status_manager.go:895] "Failed to get status for pod" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.020456 5113 status_manager.go:895] "Failed to get status for pod" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-66458b6674-65wnm\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.121866 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-provider-selection\") pod \"62474d91-1e1c-48ee-b28d-bfa517692c72\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.122453 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-service-ca\") pod \"62474d91-1e1c-48ee-b28d-bfa517692c72\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.122511 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-audit-policies\") pod \"62474d91-1e1c-48ee-b28d-bfa517692c72\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.122599 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/6ab098a6-e349-4f74-b76f-e0ff9261030e-var-lock\") pod \"6ab098a6-e349-4f74-b76f-e0ff9261030e\" (UID: \"6ab098a6-e349-4f74-b76f-e0ff9261030e\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.122640 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-idp-0-file-data\") pod \"62474d91-1e1c-48ee-b28d-bfa517692c72\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.122729 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-error\") pod \"62474d91-1e1c-48ee-b28d-bfa517692c72\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.122772 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-cliconfig\") pod \"62474d91-1e1c-48ee-b28d-bfa517692c72\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.122805 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6ab098a6-e349-4f74-b76f-e0ff9261030e-var-lock" (OuterVolumeSpecName: "var-lock") pod "6ab098a6-e349-4f74-b76f-e0ff9261030e" (UID: "6ab098a6-e349-4f74-b76f-e0ff9261030e"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.122863 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-serving-cert\") pod \"62474d91-1e1c-48ee-b28d-bfa517692c72\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.122913 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6ab098a6-e349-4f74-b76f-e0ff9261030e-kubelet-dir\") pod \"6ab098a6-e349-4f74-b76f-e0ff9261030e\" (UID: \"6ab098a6-e349-4f74-b76f-e0ff9261030e\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.122979 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c9phx\" (UniqueName: \"kubernetes.io/projected/62474d91-1e1c-48ee-b28d-bfa517692c72-kube-api-access-c9phx\") pod \"62474d91-1e1c-48ee-b28d-bfa517692c72\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.123023 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-ocp-branding-template\") pod \"62474d91-1e1c-48ee-b28d-bfa517692c72\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.123094 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/62474d91-1e1c-48ee-b28d-bfa517692c72-audit-dir\") pod \"62474d91-1e1c-48ee-b28d-bfa517692c72\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.123210 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ab098a6-e349-4f74-b76f-e0ff9261030e-kube-api-access\") pod \"6ab098a6-e349-4f74-b76f-e0ff9261030e\" (UID: \"6ab098a6-e349-4f74-b76f-e0ff9261030e\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.123240 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-router-certs\") pod \"62474d91-1e1c-48ee-b28d-bfa517692c72\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.123282 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-session\") pod \"62474d91-1e1c-48ee-b28d-bfa517692c72\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.123301 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "62474d91-1e1c-48ee-b28d-bfa517692c72" (UID: "62474d91-1e1c-48ee-b28d-bfa517692c72"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.123341 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-login\") pod \"62474d91-1e1c-48ee-b28d-bfa517692c72\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.123397 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-trusted-ca-bundle\") pod \"62474d91-1e1c-48ee-b28d-bfa517692c72\" (UID: \"62474d91-1e1c-48ee-b28d-bfa517692c72\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.123556 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "62474d91-1e1c-48ee-b28d-bfa517692c72" (UID: "62474d91-1e1c-48ee-b28d-bfa517692c72"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.124215 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6ab098a6-e349-4f74-b76f-e0ff9261030e-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "6ab098a6-e349-4f74-b76f-e0ff9261030e" (UID: "6ab098a6-e349-4f74-b76f-e0ff9261030e"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.124283 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62474d91-1e1c-48ee-b28d-bfa517692c72-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "62474d91-1e1c-48ee-b28d-bfa517692c72" (UID: "62474d91-1e1c-48ee-b28d-bfa517692c72"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.124470 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "62474d91-1e1c-48ee-b28d-bfa517692c72" (UID: "62474d91-1e1c-48ee-b28d-bfa517692c72"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.124857 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.124898 5113 reconciler_common.go:299] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.124915 5113 reconciler_common.go:299] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/6ab098a6-e349-4f74-b76f-e0ff9261030e-var-lock\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.124926 5113 reconciler_common.go:299] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6ab098a6-e349-4f74-b76f-e0ff9261030e-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.124939 5113 reconciler_common.go:299] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/62474d91-1e1c-48ee-b28d-bfa517692c72-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.124951 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.125239 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "62474d91-1e1c-48ee-b28d-bfa517692c72" (UID: "62474d91-1e1c-48ee-b28d-bfa517692c72"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.130515 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "62474d91-1e1c-48ee-b28d-bfa517692c72" (UID: "62474d91-1e1c-48ee-b28d-bfa517692c72"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.130825 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62474d91-1e1c-48ee-b28d-bfa517692c72-kube-api-access-c9phx" (OuterVolumeSpecName: "kube-api-access-c9phx") pod "62474d91-1e1c-48ee-b28d-bfa517692c72" (UID: "62474d91-1e1c-48ee-b28d-bfa517692c72"). InnerVolumeSpecName "kube-api-access-c9phx". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.130993 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "62474d91-1e1c-48ee-b28d-bfa517692c72" (UID: "62474d91-1e1c-48ee-b28d-bfa517692c72"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.131210 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ab098a6-e349-4f74-b76f-e0ff9261030e-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "6ab098a6-e349-4f74-b76f-e0ff9261030e" (UID: "6ab098a6-e349-4f74-b76f-e0ff9261030e"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.131542 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "62474d91-1e1c-48ee-b28d-bfa517692c72" (UID: "62474d91-1e1c-48ee-b28d-bfa517692c72"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.131242 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "62474d91-1e1c-48ee-b28d-bfa517692c72" (UID: "62474d91-1e1c-48ee-b28d-bfa517692c72"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.131639 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "62474d91-1e1c-48ee-b28d-bfa517692c72" (UID: "62474d91-1e1c-48ee-b28d-bfa517692c72"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.132405 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "62474d91-1e1c-48ee-b28d-bfa517692c72" (UID: "62474d91-1e1c-48ee-b28d-bfa517692c72"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.132583 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "62474d91-1e1c-48ee-b28d-bfa517692c72" (UID: "62474d91-1e1c-48ee-b28d-bfa517692c72"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.132937 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "62474d91-1e1c-48ee-b28d-bfa517692c72" (UID: "62474d91-1e1c-48ee-b28d-bfa517692c72"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.226167 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ab098a6-e349-4f74-b76f-e0ff9261030e-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.226233 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.226258 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.226272 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.226291 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.226308 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.226321 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.226334 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.226348 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.226360 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-c9phx\" (UniqueName: \"kubernetes.io/projected/62474d91-1e1c-48ee-b28d-bfa517692c72-kube-api-access-c9phx\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.226374 5113 reconciler_common.go:299] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/62474d91-1e1c-48ee-b28d-bfa517692c72-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.551689 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-12-crc" event={"ID":"6ab098a6-e349-4f74-b76f-e0ff9261030e","Type":"ContainerDied","Data":"99d5e969bbbded82782b610f8cc36f4965ea303364f692864ae6655a7e903ad1"} Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.552119 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="99d5e969bbbded82782b610f8cc36f4965ea303364f692864ae6655a7e903ad1" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.551950 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-12-crc" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.553258 5113 generic.go:358] "Generic (PLEG): container finished" podID="62474d91-1e1c-48ee-b28d-bfa517692c72" containerID="44303fbf12cf56eb2af44dda163ec8294445ec52bf54b2dd9742433cef3671d7" exitCode=0 Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.553355 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" event={"ID":"62474d91-1e1c-48ee-b28d-bfa517692c72","Type":"ContainerDied","Data":"44303fbf12cf56eb2af44dda163ec8294445ec52bf54b2dd9742433cef3671d7"} Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.553371 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" event={"ID":"62474d91-1e1c-48ee-b28d-bfa517692c72","Type":"ContainerDied","Data":"ccfb96aa177309c7b9966ad3df03f30a38ef6cf711e6a696f93d7e1cbaf73b71"} Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.553386 5113 scope.go:117] "RemoveContainer" containerID="44303fbf12cf56eb2af44dda163ec8294445ec52bf54b2dd9742433cef3671d7" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.553468 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.554895 5113 status_manager.go:895] "Failed to get status for pod" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-66458b6674-65wnm\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.555484 5113 status_manager.go:895] "Failed to get status for pod" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.672094 5113 status_manager.go:895] "Failed to get status for pod" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.672499 5113 status_manager.go:895] "Failed to get status for pod" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-66458b6674-65wnm\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.673258 5113 status_manager.go:895] "Failed to get status for pod" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.673670 5113 status_manager.go:895] "Failed to get status for pod" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-66458b6674-65wnm\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.676626 5113 scope.go:117] "RemoveContainer" containerID="44303fbf12cf56eb2af44dda163ec8294445ec52bf54b2dd9742433cef3671d7" Jan 30 00:13:18 crc kubenswrapper[5113]: E0130 00:13:18.677185 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44303fbf12cf56eb2af44dda163ec8294445ec52bf54b2dd9742433cef3671d7\": container with ID starting with 44303fbf12cf56eb2af44dda163ec8294445ec52bf54b2dd9742433cef3671d7 not found: ID does not exist" containerID="44303fbf12cf56eb2af44dda163ec8294445ec52bf54b2dd9742433cef3671d7" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.677256 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44303fbf12cf56eb2af44dda163ec8294445ec52bf54b2dd9742433cef3671d7"} err="failed to get container status \"44303fbf12cf56eb2af44dda163ec8294445ec52bf54b2dd9742433cef3671d7\": rpc error: code = NotFound desc = could not find container \"44303fbf12cf56eb2af44dda163ec8294445ec52bf54b2dd9742433cef3671d7\": container with ID starting with 44303fbf12cf56eb2af44dda163ec8294445ec52bf54b2dd9742433cef3671d7 not found: ID does not exist" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.742494 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-cert-syncer/0.log" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.744208 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.745369 5113 status_manager.go:895] "Failed to get status for pod" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-66458b6674-65wnm\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.746167 5113 status_manager.go:895] "Failed to get status for pod" podUID="3a14caf222afb62aaabdc47808b6f944" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.746845 5113 status_manager.go:895] "Failed to get status for pod" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.865852 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-cert-dir\") pod \"3a14caf222afb62aaabdc47808b6f944\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.866020 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "3a14caf222afb62aaabdc47808b6f944" (UID: "3a14caf222afb62aaabdc47808b6f944"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.866899 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-tmp-dir\") pod \"3a14caf222afb62aaabdc47808b6f944\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.867136 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-audit-dir\") pod \"3a14caf222afb62aaabdc47808b6f944\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.867349 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-resource-dir\") pod \"3a14caf222afb62aaabdc47808b6f944\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.867252 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "3a14caf222afb62aaabdc47808b6f944" (UID: "3a14caf222afb62aaabdc47808b6f944"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.867507 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "3a14caf222afb62aaabdc47808b6f944" (UID: "3a14caf222afb62aaabdc47808b6f944"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.867876 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ca-bundle-dir\" (UniqueName: \"kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-ca-bundle-dir\") pod \"3a14caf222afb62aaabdc47808b6f944\" (UID: \"3a14caf222afb62aaabdc47808b6f944\") " Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.868200 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-ca-bundle-dir" (OuterVolumeSpecName: "ca-bundle-dir") pod "3a14caf222afb62aaabdc47808b6f944" (UID: "3a14caf222afb62aaabdc47808b6f944"). InnerVolumeSpecName "ca-bundle-dir". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.868687 5113 reconciler_common.go:299] "Volume detached for volume \"ca-bundle-dir\" (UniqueName: \"kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-ca-bundle-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.868835 5113 reconciler_common.go:299] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.868990 5113 reconciler_common.go:299] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.869143 5113 reconciler_common.go:299] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3a14caf222afb62aaabdc47808b6f944-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.871731 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-tmp-dir" (OuterVolumeSpecName: "tmp-dir") pod "3a14caf222afb62aaabdc47808b6f944" (UID: "3a14caf222afb62aaabdc47808b6f944"). InnerVolumeSpecName "tmp-dir". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:13:18 crc kubenswrapper[5113]: I0130 00:13:18.971891 5113 reconciler_common.go:299] "Volume detached for volume \"tmp-dir\" (UniqueName: \"kubernetes.io/empty-dir/3a14caf222afb62aaabdc47808b6f944-tmp-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.565954 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_3a14caf222afb62aaabdc47808b6f944/kube-apiserver-cert-syncer/0.log" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.567447 5113 generic.go:358] "Generic (PLEG): container finished" podID="3a14caf222afb62aaabdc47808b6f944" containerID="f193a62499ba084b7bc2ab4965ca8f7c645e6f17135a246bce0bba25105ae580" exitCode=0 Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.567623 5113 scope.go:117] "RemoveContainer" containerID="a9ddcb5477ffdd58c27676a12d79a7574b5e7d2ad48fd0a4349b255dd4bd3dd2" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.567635 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.568404 5113 status_manager.go:895] "Failed to get status for pod" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.569254 5113 status_manager.go:895] "Failed to get status for pod" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-66458b6674-65wnm\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.569817 5113 status_manager.go:895] "Failed to get status for pod" podUID="3a14caf222afb62aaabdc47808b6f944" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.590955 5113 scope.go:117] "RemoveContainer" containerID="77c6a4ce119d456c938cd7b1ab2a1187857c309661ce7a3c4ebafdc977385d24" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.594152 5113 status_manager.go:895] "Failed to get status for pod" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.594408 5113 status_manager.go:895] "Failed to get status for pod" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-66458b6674-65wnm\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.594595 5113 status_manager.go:895] "Failed to get status for pod" podUID="3a14caf222afb62aaabdc47808b6f944" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.622290 5113 scope.go:117] "RemoveContainer" containerID="10b39007b1d5476031fa74088ab1fc27641cd0d8637344b799ccb7bd0d7ed170" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.642229 5113 scope.go:117] "RemoveContainer" containerID="3a34c408f150a15c63352bc45a7746036fdb5242aeda8b3d3f68c530dcacca16" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.669622 5113 scope.go:117] "RemoveContainer" containerID="f193a62499ba084b7bc2ab4965ca8f7c645e6f17135a246bce0bba25105ae580" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.689203 5113 scope.go:117] "RemoveContainer" containerID="84f00261046fc5a9b778b011faaff480069dd0eb02fda16510bbd01e21895988" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.769984 5113 scope.go:117] "RemoveContainer" containerID="a9ddcb5477ffdd58c27676a12d79a7574b5e7d2ad48fd0a4349b255dd4bd3dd2" Jan 30 00:13:19 crc kubenswrapper[5113]: E0130 00:13:19.770630 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9ddcb5477ffdd58c27676a12d79a7574b5e7d2ad48fd0a4349b255dd4bd3dd2\": container with ID starting with a9ddcb5477ffdd58c27676a12d79a7574b5e7d2ad48fd0a4349b255dd4bd3dd2 not found: ID does not exist" containerID="a9ddcb5477ffdd58c27676a12d79a7574b5e7d2ad48fd0a4349b255dd4bd3dd2" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.770700 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9ddcb5477ffdd58c27676a12d79a7574b5e7d2ad48fd0a4349b255dd4bd3dd2"} err="failed to get container status \"a9ddcb5477ffdd58c27676a12d79a7574b5e7d2ad48fd0a4349b255dd4bd3dd2\": rpc error: code = NotFound desc = could not find container \"a9ddcb5477ffdd58c27676a12d79a7574b5e7d2ad48fd0a4349b255dd4bd3dd2\": container with ID starting with a9ddcb5477ffdd58c27676a12d79a7574b5e7d2ad48fd0a4349b255dd4bd3dd2 not found: ID does not exist" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.770738 5113 scope.go:117] "RemoveContainer" containerID="77c6a4ce119d456c938cd7b1ab2a1187857c309661ce7a3c4ebafdc977385d24" Jan 30 00:13:19 crc kubenswrapper[5113]: E0130 00:13:19.771226 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77c6a4ce119d456c938cd7b1ab2a1187857c309661ce7a3c4ebafdc977385d24\": container with ID starting with 77c6a4ce119d456c938cd7b1ab2a1187857c309661ce7a3c4ebafdc977385d24 not found: ID does not exist" containerID="77c6a4ce119d456c938cd7b1ab2a1187857c309661ce7a3c4ebafdc977385d24" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.771269 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77c6a4ce119d456c938cd7b1ab2a1187857c309661ce7a3c4ebafdc977385d24"} err="failed to get container status \"77c6a4ce119d456c938cd7b1ab2a1187857c309661ce7a3c4ebafdc977385d24\": rpc error: code = NotFound desc = could not find container \"77c6a4ce119d456c938cd7b1ab2a1187857c309661ce7a3c4ebafdc977385d24\": container with ID starting with 77c6a4ce119d456c938cd7b1ab2a1187857c309661ce7a3c4ebafdc977385d24 not found: ID does not exist" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.771296 5113 scope.go:117] "RemoveContainer" containerID="10b39007b1d5476031fa74088ab1fc27641cd0d8637344b799ccb7bd0d7ed170" Jan 30 00:13:19 crc kubenswrapper[5113]: E0130 00:13:19.771732 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10b39007b1d5476031fa74088ab1fc27641cd0d8637344b799ccb7bd0d7ed170\": container with ID starting with 10b39007b1d5476031fa74088ab1fc27641cd0d8637344b799ccb7bd0d7ed170 not found: ID does not exist" containerID="10b39007b1d5476031fa74088ab1fc27641cd0d8637344b799ccb7bd0d7ed170" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.771767 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10b39007b1d5476031fa74088ab1fc27641cd0d8637344b799ccb7bd0d7ed170"} err="failed to get container status \"10b39007b1d5476031fa74088ab1fc27641cd0d8637344b799ccb7bd0d7ed170\": rpc error: code = NotFound desc = could not find container \"10b39007b1d5476031fa74088ab1fc27641cd0d8637344b799ccb7bd0d7ed170\": container with ID starting with 10b39007b1d5476031fa74088ab1fc27641cd0d8637344b799ccb7bd0d7ed170 not found: ID does not exist" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.771787 5113 scope.go:117] "RemoveContainer" containerID="3a34c408f150a15c63352bc45a7746036fdb5242aeda8b3d3f68c530dcacca16" Jan 30 00:13:19 crc kubenswrapper[5113]: E0130 00:13:19.772051 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a34c408f150a15c63352bc45a7746036fdb5242aeda8b3d3f68c530dcacca16\": container with ID starting with 3a34c408f150a15c63352bc45a7746036fdb5242aeda8b3d3f68c530dcacca16 not found: ID does not exist" containerID="3a34c408f150a15c63352bc45a7746036fdb5242aeda8b3d3f68c530dcacca16" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.772074 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a34c408f150a15c63352bc45a7746036fdb5242aeda8b3d3f68c530dcacca16"} err="failed to get container status \"3a34c408f150a15c63352bc45a7746036fdb5242aeda8b3d3f68c530dcacca16\": rpc error: code = NotFound desc = could not find container \"3a34c408f150a15c63352bc45a7746036fdb5242aeda8b3d3f68c530dcacca16\": container with ID starting with 3a34c408f150a15c63352bc45a7746036fdb5242aeda8b3d3f68c530dcacca16 not found: ID does not exist" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.772089 5113 scope.go:117] "RemoveContainer" containerID="f193a62499ba084b7bc2ab4965ca8f7c645e6f17135a246bce0bba25105ae580" Jan 30 00:13:19 crc kubenswrapper[5113]: E0130 00:13:19.772511 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f193a62499ba084b7bc2ab4965ca8f7c645e6f17135a246bce0bba25105ae580\": container with ID starting with f193a62499ba084b7bc2ab4965ca8f7c645e6f17135a246bce0bba25105ae580 not found: ID does not exist" containerID="f193a62499ba084b7bc2ab4965ca8f7c645e6f17135a246bce0bba25105ae580" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.772643 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f193a62499ba084b7bc2ab4965ca8f7c645e6f17135a246bce0bba25105ae580"} err="failed to get container status \"f193a62499ba084b7bc2ab4965ca8f7c645e6f17135a246bce0bba25105ae580\": rpc error: code = NotFound desc = could not find container \"f193a62499ba084b7bc2ab4965ca8f7c645e6f17135a246bce0bba25105ae580\": container with ID starting with f193a62499ba084b7bc2ab4965ca8f7c645e6f17135a246bce0bba25105ae580 not found: ID does not exist" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.772682 5113 scope.go:117] "RemoveContainer" containerID="84f00261046fc5a9b778b011faaff480069dd0eb02fda16510bbd01e21895988" Jan 30 00:13:19 crc kubenswrapper[5113]: E0130 00:13:19.773323 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84f00261046fc5a9b778b011faaff480069dd0eb02fda16510bbd01e21895988\": container with ID starting with 84f00261046fc5a9b778b011faaff480069dd0eb02fda16510bbd01e21895988 not found: ID does not exist" containerID="84f00261046fc5a9b778b011faaff480069dd0eb02fda16510bbd01e21895988" Jan 30 00:13:19 crc kubenswrapper[5113]: I0130 00:13:19.773356 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84f00261046fc5a9b778b011faaff480069dd0eb02fda16510bbd01e21895988"} err="failed to get container status \"84f00261046fc5a9b778b011faaff480069dd0eb02fda16510bbd01e21895988\": rpc error: code = NotFound desc = could not find container \"84f00261046fc5a9b778b011faaff480069dd0eb02fda16510bbd01e21895988\": container with ID starting with 84f00261046fc5a9b778b011faaff480069dd0eb02fda16510bbd01e21895988 not found: ID does not exist" Jan 30 00:13:20 crc kubenswrapper[5113]: I0130 00:13:20.781338 5113 status_manager.go:895] "Failed to get status for pod" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:20 crc kubenswrapper[5113]: I0130 00:13:20.782823 5113 status_manager.go:895] "Failed to get status for pod" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-66458b6674-65wnm\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:20 crc kubenswrapper[5113]: I0130 00:13:20.783652 5113 status_manager.go:895] "Failed to get status for pod" podUID="3a14caf222afb62aaabdc47808b6f944" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:20 crc kubenswrapper[5113]: I0130 00:13:20.784404 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a14caf222afb62aaabdc47808b6f944" path="/var/lib/kubelet/pods/3a14caf222afb62aaabdc47808b6f944/volumes" Jan 30 00:13:21 crc kubenswrapper[5113]: I0130 00:13:21.196164 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:13:21 crc kubenswrapper[5113]: I0130 00:13:21.196281 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:13:25 crc kubenswrapper[5113]: E0130 00:13:25.288341 5113 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:25 crc kubenswrapper[5113]: E0130 00:13:25.289881 5113 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:25 crc kubenswrapper[5113]: E0130 00:13:25.290495 5113 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:25 crc kubenswrapper[5113]: E0130 00:13:25.291156 5113 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:25 crc kubenswrapper[5113]: E0130 00:13:25.291757 5113 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:25 crc kubenswrapper[5113]: I0130 00:13:25.291814 5113 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 30 00:13:25 crc kubenswrapper[5113]: E0130 00:13:25.292322 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="200ms" Jan 30 00:13:25 crc kubenswrapper[5113]: E0130 00:13:25.493996 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="400ms" Jan 30 00:13:25 crc kubenswrapper[5113]: E0130 00:13:25.853868 5113 desired_state_of_world_populator.go:305] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.251:6443: connect: connection refused" pod="openshift-image-registry/image-registry-66587d64c8-bnj47" volumeName="registry-storage" Jan 30 00:13:25 crc kubenswrapper[5113]: E0130 00:13:25.895271 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="800ms" Jan 30 00:13:26 crc kubenswrapper[5113]: E0130 00:13:26.696544 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="1.6s" Jan 30 00:13:27 crc kubenswrapper[5113]: E0130 00:13:27.774259 5113 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.251:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188f59dfe260fc5b openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f7dbc7e1ee9c187a863ef9b473fad27b,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:68c07ee2fb6450c7b3b35bfdfc158dc475aaa0bcf9fba28b5e310d7e03355c04\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 00:13:16.690480219 +0000 UTC m=+216.763085606,LastTimestamp:2026-01-30 00:13:16.690480219 +0000 UTC m=+216.763085606,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 00:13:28 crc kubenswrapper[5113]: E0130 00:13:28.297555 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="3.2s" Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.652665 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/0.log" Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.652737 5113 generic.go:358] "Generic (PLEG): container finished" podID="9f0bc7fcb0822a2c13eb2d22cd8c0641" containerID="6e418d2037fa46413cbb5c58dc73ecf2ecc6f110ebd3bfec9715e53ec0b6c855" exitCode=1 Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.652811 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"9f0bc7fcb0822a2c13eb2d22cd8c0641","Type":"ContainerDied","Data":"6e418d2037fa46413cbb5c58dc73ecf2ecc6f110ebd3bfec9715e53ec0b6c855"} Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.653862 5113 scope.go:117] "RemoveContainer" containerID="6e418d2037fa46413cbb5c58dc73ecf2ecc6f110ebd3bfec9715e53ec0b6c855" Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.654394 5113 status_manager.go:895] "Failed to get status for pod" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.655224 5113 status_manager.go:895] "Failed to get status for pod" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.655694 5113 status_manager.go:895] "Failed to get status for pod" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-66458b6674-65wnm\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.779907 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.780909 5113 status_manager.go:895] "Failed to get status for pod" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.781742 5113 status_manager.go:895] "Failed to get status for pod" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.782375 5113 status_manager.go:895] "Failed to get status for pod" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-66458b6674-65wnm\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.782943 5113 status_manager.go:895] "Failed to get status for pod" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.783327 5113 status_manager.go:895] "Failed to get status for pod" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.783786 5113 status_manager.go:895] "Failed to get status for pod" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-66458b6674-65wnm\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.806367 5113 kubelet.go:3323] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="df6f4867-b098-485b-81b7-844ef832d471" Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.806419 5113 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="df6f4867-b098-485b-81b7-844ef832d471" Jan 30 00:13:30 crc kubenswrapper[5113]: E0130 00:13:30.807110 5113 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.807693 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:30 crc kubenswrapper[5113]: W0130 00:13:30.831606 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod57755cc5f99000cc11e193051474d4e2.slice/crio-1b3ac1643dace703d449fcdd4d67a44559fd12a1cf5d95c7cadd92f8a931aeb3 WatchSource:0}: Error finding container 1b3ac1643dace703d449fcdd4d67a44559fd12a1cf5d95c7cadd92f8a931aeb3: Status 404 returned error can't find the container with id 1b3ac1643dace703d449fcdd4d67a44559fd12a1cf5d95c7cadd92f8a931aeb3 Jan 30 00:13:30 crc kubenswrapper[5113]: I0130 00:13:30.883602 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:13:31 crc kubenswrapper[5113]: E0130 00:13:31.498487 5113 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.251:6443: connect: connection refused" interval="6.4s" Jan 30 00:13:31 crc kubenswrapper[5113]: I0130 00:13:31.662312 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/0.log" Jan 30 00:13:31 crc kubenswrapper[5113]: I0130 00:13:31.662461 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"9f0bc7fcb0822a2c13eb2d22cd8c0641","Type":"ContainerStarted","Data":"be7ed6702aa0f1cd7ecf956fe806b9bf79890b6cc8217c82e22ce5ce579cef78"} Jan 30 00:13:31 crc kubenswrapper[5113]: I0130 00:13:31.663346 5113 status_manager.go:895] "Failed to get status for pod" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:31 crc kubenswrapper[5113]: I0130 00:13:31.663925 5113 status_manager.go:895] "Failed to get status for pod" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-66458b6674-65wnm\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:31 crc kubenswrapper[5113]: I0130 00:13:31.664183 5113 generic.go:358] "Generic (PLEG): container finished" podID="57755cc5f99000cc11e193051474d4e2" containerID="09a4ce7b4c89dc8e005a85bc38dc080741c1bfd4a30bf7f4228f673b1c70d94d" exitCode=0 Jan 30 00:13:31 crc kubenswrapper[5113]: I0130 00:13:31.664193 5113 status_manager.go:895] "Failed to get status for pod" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:31 crc kubenswrapper[5113]: I0130 00:13:31.664293 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"57755cc5f99000cc11e193051474d4e2","Type":"ContainerDied","Data":"09a4ce7b4c89dc8e005a85bc38dc080741c1bfd4a30bf7f4228f673b1c70d94d"} Jan 30 00:13:31 crc kubenswrapper[5113]: I0130 00:13:31.664361 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"57755cc5f99000cc11e193051474d4e2","Type":"ContainerStarted","Data":"1b3ac1643dace703d449fcdd4d67a44559fd12a1cf5d95c7cadd92f8a931aeb3"} Jan 30 00:13:31 crc kubenswrapper[5113]: I0130 00:13:31.664853 5113 kubelet.go:3323] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="df6f4867-b098-485b-81b7-844ef832d471" Jan 30 00:13:31 crc kubenswrapper[5113]: I0130 00:13:31.664885 5113 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="df6f4867-b098-485b-81b7-844ef832d471" Jan 30 00:13:31 crc kubenswrapper[5113]: I0130 00:13:31.665394 5113 status_manager.go:895] "Failed to get status for pod" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" pod="openshift-authentication/oauth-openshift-66458b6674-65wnm" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-66458b6674-65wnm\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:31 crc kubenswrapper[5113]: E0130 00:13:31.665405 5113 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:31 crc kubenswrapper[5113]: I0130 00:13:31.665727 5113 status_manager.go:895] "Failed to get status for pod" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:31 crc kubenswrapper[5113]: I0130 00:13:31.666172 5113 status_manager.go:895] "Failed to get status for pod" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" pod="openshift-kube-apiserver/installer-12-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-12-crc\": dial tcp 38.102.83.251:6443: connect: connection refused" Jan 30 00:13:32 crc kubenswrapper[5113]: I0130 00:13:32.677337 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"57755cc5f99000cc11e193051474d4e2","Type":"ContainerStarted","Data":"2bd9756481bcb7b1a9cbbfaf889fdc59c70f57fc8b63f817089adc729e65759a"} Jan 30 00:13:32 crc kubenswrapper[5113]: I0130 00:13:32.677963 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"57755cc5f99000cc11e193051474d4e2","Type":"ContainerStarted","Data":"82e097d651fe3429812893f8bf80f2cbca33c437e4070a275b7427b1699fe4f2"} Jan 30 00:13:32 crc kubenswrapper[5113]: I0130 00:13:32.677992 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"57755cc5f99000cc11e193051474d4e2","Type":"ContainerStarted","Data":"6fbae9bcd3a6d34f2eb30793d988b25cc358f193ddfffefc442d8f461a938abf"} Jan 30 00:13:33 crc kubenswrapper[5113]: I0130 00:13:33.687908 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"57755cc5f99000cc11e193051474d4e2","Type":"ContainerStarted","Data":"c6d7bc01c056b2d3ae2abe520718d7f4776857a52dd707bc35415c231ebd9b79"} Jan 30 00:13:33 crc kubenswrapper[5113]: I0130 00:13:33.688249 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"57755cc5f99000cc11e193051474d4e2","Type":"ContainerStarted","Data":"0d0340c0151bff41250b41472d82601b3b190d81549637dd331bc3f34b86736a"} Jan 30 00:13:33 crc kubenswrapper[5113]: I0130 00:13:33.688275 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:33 crc kubenswrapper[5113]: I0130 00:13:33.688331 5113 kubelet.go:3323] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="df6f4867-b098-485b-81b7-844ef832d471" Jan 30 00:13:33 crc kubenswrapper[5113]: I0130 00:13:33.688373 5113 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="df6f4867-b098-485b-81b7-844ef832d471" Jan 30 00:13:35 crc kubenswrapper[5113]: I0130 00:13:35.808003 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:35 crc kubenswrapper[5113]: I0130 00:13:35.808090 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:35 crc kubenswrapper[5113]: I0130 00:13:35.816960 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:38 crc kubenswrapper[5113]: I0130 00:13:38.766136 5113 kubelet.go:3329] "Deleted mirror pod as it didn't match the static Pod" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:38 crc kubenswrapper[5113]: I0130 00:13:38.766887 5113 kubelet.go:3340] "Creating a mirror pod for static pod" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:39 crc kubenswrapper[5113]: I0130 00:13:39.737253 5113 kubelet.go:3323] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="df6f4867-b098-485b-81b7-844ef832d471" Jan 30 00:13:39 crc kubenswrapper[5113]: I0130 00:13:39.737299 5113 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="df6f4867-b098-485b-81b7-844ef832d471" Jan 30 00:13:39 crc kubenswrapper[5113]: I0130 00:13:39.743136 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:13:40 crc kubenswrapper[5113]: I0130 00:13:40.566640 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:13:40 crc kubenswrapper[5113]: I0130 00:13:40.567064 5113 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 30 00:13:40 crc kubenswrapper[5113]: I0130 00:13:40.568069 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="9f0bc7fcb0822a2c13eb2d22cd8c0641" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 30 00:13:40 crc kubenswrapper[5113]: I0130 00:13:40.747389 5113 kubelet.go:3323] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="df6f4867-b098-485b-81b7-844ef832d471" Jan 30 00:13:40 crc kubenswrapper[5113]: I0130 00:13:40.747445 5113 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="df6f4867-b098-485b-81b7-844ef832d471" Jan 30 00:13:40 crc kubenswrapper[5113]: I0130 00:13:40.815581 5113 status_manager.go:905] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="57755cc5f99000cc11e193051474d4e2" podUID="7c64d6d7-b1b4-4fbc-b0a9-712fc354cd84" Jan 30 00:13:40 crc kubenswrapper[5113]: I0130 00:13:40.883630 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:13:48 crc kubenswrapper[5113]: I0130 00:13:48.913331 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns-operator\"/\"dns-operator-dockercfg-wbbsn\"" Jan 30 00:13:49 crc kubenswrapper[5113]: I0130 00:13:49.244190 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"openshift-service-ca.crt\"" Jan 30 00:13:49 crc kubenswrapper[5113]: I0130 00:13:49.957829 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-controller-dockercfg-xnj77\"" Jan 30 00:13:49 crc kubenswrapper[5113]: I0130 00:13:49.960214 5113 reflector.go:430] "Caches populated" type="*v1.RuntimeClass" reflector="k8s.io/client-go/informers/factory.go:160" Jan 30 00:13:50 crc kubenswrapper[5113]: I0130 00:13:50.150547 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"machine-api-operator-dockercfg-6n5ln\"" Jan 30 00:13:50 crc kubenswrapper[5113]: I0130 00:13:50.168355 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"trusted-ca-bundle\"" Jan 30 00:13:50 crc kubenswrapper[5113]: I0130 00:13:50.377391 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"node-bootstrapper-token\"" Jan 30 00:13:50 crc kubenswrapper[5113]: I0130 00:13:50.458881 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"marketplace-operator-metrics\"" Jan 30 00:13:50 crc kubenswrapper[5113]: I0130 00:13:50.572967 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:13:50 crc kubenswrapper[5113]: I0130 00:13:50.580735 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 00:13:50 crc kubenswrapper[5113]: I0130 00:13:50.907593 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca-operator\"/\"serving-cert\"" Jan 30 00:13:50 crc kubenswrapper[5113]: I0130 00:13:50.968100 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-config-operator\"/\"kube-root-ca.crt\"" Jan 30 00:13:51 crc kubenswrapper[5113]: I0130 00:13:51.195952 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:13:51 crc kubenswrapper[5113]: I0130 00:13:51.196101 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:13:51 crc kubenswrapper[5113]: I0130 00:13:51.280907 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:13:51 crc kubenswrapper[5113]: I0130 00:13:51.332993 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator\"/\"kube-root-ca.crt\"" Jan 30 00:13:51 crc kubenswrapper[5113]: I0130 00:13:51.565367 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver-operator\"/\"kube-root-ca.crt\"" Jan 30 00:13:51 crc kubenswrapper[5113]: I0130 00:13:51.593220 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:13:51 crc kubenswrapper[5113]: I0130 00:13:51.622457 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns\"/\"node-resolver-dockercfg-tk7bt\"" Jan 30 00:13:51 crc kubenswrapper[5113]: I0130 00:13:51.631760 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"audit-1\"" Jan 30 00:13:51 crc kubenswrapper[5113]: I0130 00:13:51.712770 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-route-controller-manager\"/\"serving-cert\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.017634 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-serving-cert\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.119881 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"control-plane-machine-set-operator-tls\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.144385 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-root-ca.crt\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.339935 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operator-lifecycle-manager\"/\"kube-root-ca.crt\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.351211 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-samples-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.385674 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console-operator\"/\"kube-root-ca.crt\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.431372 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"redhat-marketplace-dockercfg-gg4w7\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.436839 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-api\"/\"openshift-service-ca.crt\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.442016 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"kube-root-ca.crt\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.569257 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-config-operator\"/\"config-operator-serving-cert\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.675470 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"hostpath-provisioner\"/\"openshift-service-ca.crt\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.749172 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication-operator\"/\"serving-cert\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.754395 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"trusted-ca-bundle\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.762772 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-scheduler-operator\"/\"kube-root-ca.crt\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.781560 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-oauth-apiserver\"/\"encryption-config-1\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.824472 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"pprof-cert\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.836943 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-controller-manager\"/\"openshift-controller-manager-sa-dockercfg-djmfg\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.909869 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-dockercfg-6c46w\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.913245 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress\"/\"openshift-service-ca.crt\"" Jan 30 00:13:52 crc kubenswrapper[5113]: I0130 00:13:52.981049 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ovn-kubernetes\"/\"ovn-kubernetes-node-dockercfg-l2v2m\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.047222 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-route-controller-manager\"/\"route-controller-manager-sa-dockercfg-mmcpt\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.083841 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-version\"/\"default-dockercfg-hqpm5\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.109890 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console-operator\"/\"console-operator-config\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.114584 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca-operator\"/\"kube-root-ca.crt\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.141438 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-server-tls\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.169208 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns\"/\"dns-default\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.178057 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"kube-root-ca.crt\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.283982 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"image-registry-tls\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.322321 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-diagnostics\"/\"openshift-service-ca.crt\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.409147 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-metrics-certs-default\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.412774 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-canary\"/\"openshift-service-ca.crt\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.470016 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-serving-cert\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.517134 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console\"/\"console-dockercfg-8dkm8\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.586971 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-machine-approver\"/\"openshift-service-ca.crt\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.589163 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console\"/\"console-oauth-config\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.628727 5113 reflector.go:430] "Caches populated" type="*v1.Service" reflector="k8s.io/client-go/informers/factory.go:160" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.641150 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-config-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.643768 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console\"/\"console-serving-cert\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.644447 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-storage-version-migrator\"/\"kube-storage-version-migrator-sa-dockercfg-kknhg\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.690123 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"config\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.741092 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-canary\"/\"canary-serving-cert\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.807072 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-api\"/\"kube-rbac-proxy\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.868227 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"pruner-dockercfg-rs58m\"" Jan 30 00:13:53 crc kubenswrapper[5113]: I0130 00:13:53.936437 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-api\"/\"machine-api-operator-images\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.003329 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca-operator\"/\"service-ca-operator-config\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.030561 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.032848 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver\"/\"openshift-apiserver-sa-dockercfg-4zqgh\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.149466 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"etcd-serving-ca\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.375828 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-marketplace\"/\"kube-root-ca.crt\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.385348 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"whereabouts-flatfile-config\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.443506 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca-operator\"/\"service-ca-operator-dockercfg-bjqfd\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.520546 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-operator-dockercfg-sw6nc\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.596854 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns\"/\"dns-default-metrics-tls\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.597314 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"kube-root-ca.crt\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.725811 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-api\"/\"kube-root-ca.crt\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.772217 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"kube-root-ca.crt\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.775006 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns\"/\"openshift-service-ca.crt\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.845513 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"audit-1\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.850378 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-node-identity\"/\"kube-root-ca.crt\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.939762 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-controller-manager\"/\"serving-cert\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.942494 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver\"/\"etcd-client\"" Jan 30 00:13:54 crc kubenswrapper[5113]: I0130 00:13:54.971395 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"cni-copy-resources\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.075603 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"package-server-manager-serving-cert\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.110896 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"installation-pull-secrets\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.251860 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress\"/\"kube-root-ca.crt\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.343791 5113 reflector.go:430] "Caches populated" type="*v1.Node" reflector="k8s.io/client-go/informers/factory.go:160" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.352414 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"default-dockercfg-g6kgg\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.375286 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-dockercfg-tnfx9\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.379366 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-canary\"/\"kube-root-ca.crt\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.393404 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"community-operators-dockercfg-vrd5f\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.424203 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-dockercfg-bf7fj\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.528848 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-machine-approver\"/\"kube-root-ca.crt\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.530769 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"client-ca\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.553939 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"kube-storage-version-migrator-operator-dockercfg-2h6bs\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.571662 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-samples-operator\"/\"samples-operator-tls\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.619258 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns-operator\"/\"metrics-tls\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.648727 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"registry-dockercfg-6w67b\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.678059 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-config-operator\"/\"kube-root-ca.crt\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.715778 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"ovnkube-script-lib\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.734020 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"etcd-service-ca-bundle\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.768174 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"authentication-operator-config\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.784721 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-daemon-dockercfg-w9nzh\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.811765 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"trusted-ca-bundle\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.847270 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-serving-cert\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.935495 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"service-ca\"" Jan 30 00:13:55 crc kubenswrapper[5113]: I0130 00:13:55.993976 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"kube-root-ca.crt\"" Jan 30 00:13:56 crc kubenswrapper[5113]: I0130 00:13:56.085852 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ovn-kubernetes\"/\"ovn-node-metrics-cert\"" Jan 30 00:13:56 crc kubenswrapper[5113]: I0130 00:13:56.205952 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"service-ca-bundle\"" Jan 30 00:13:56 crc kubenswrapper[5113]: I0130 00:13:56.302013 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"olm-operator-serving-cert\"" Jan 30 00:13:56 crc kubenswrapper[5113]: I0130 00:13:56.439724 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager-operator\"/\"kube-root-ca.crt\"" Jan 30 00:13:56 crc kubenswrapper[5113]: I0130 00:13:56.462087 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"trusted-ca-bundle\"" Jan 30 00:13:56 crc kubenswrapper[5113]: I0130 00:13:56.718916 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-operator\"/\"iptables-alerter-script\"" Jan 30 00:13:56 crc kubenswrapper[5113]: I0130 00:13:56.804562 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-config-operator\"/\"kube-rbac-proxy\"" Jan 30 00:13:56 crc kubenswrapper[5113]: I0130 00:13:56.814952 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-config-operator\"/\"openshift-config-operator-dockercfg-sjn6s\"" Jan 30 00:13:56 crc kubenswrapper[5113]: I0130 00:13:56.863686 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"config\"" Jan 30 00:13:56 crc kubenswrapper[5113]: I0130 00:13:56.956404 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-oauth-apiserver\"/\"etcd-client\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.007306 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-machine-approver\"/\"kube-rbac-proxy\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.045313 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-config\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.068146 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"multus-ac-dockercfg-gj7jx\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.069227 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca\"/\"kube-root-ca.crt\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.115366 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"oauth-serving-cert\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.136086 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.156556 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-version\"/\"kube-root-ca.crt\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.159673 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication-operator\"/\"authentication-operator-dockercfg-6tbpn\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.175305 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"hostpath-provisioner\"/\"kube-root-ca.crt\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.239835 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-version\"/\"openshift-service-ca.crt\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.282223 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-dockercfg-2wbn2\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.381177 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-marketplace\"/\"marketplace-trusted-ca\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.389715 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"kube-root-ca.crt\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.428333 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ovn-kubernetes\"/\"ovn-kubernetes-control-plane-dockercfg-nl8tp\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.435111 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"config\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.459606 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-stats-default\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.493089 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-dockercfg-kw8fx\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.641342 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"ovnkube-config\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.652854 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-operator\"/\"ingress-operator-dockercfg-74nwh\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.693964 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-machine-approver\"/\"machine-approver-config\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.786271 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"multus-daemon-config\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.805880 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"etcd-ca-bundle\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.899123 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"env-overrides\"" Jan 30 00:13:57 crc kubenswrapper[5113]: I0130 00:13:57.944250 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-console\"/\"networking-console-plugin\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.033782 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-scheduler-operator\"/\"kube-scheduler-operator-serving-cert\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.087871 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-oauth-apiserver\"/\"serving-cert\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.121043 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console-operator\"/\"trusted-ca\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.225061 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console-operator\"/\"console-operator-dockercfg-kl6m8\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.227080 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.235043 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-route-controller-manager\"/\"openshift-service-ca.crt\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.240033 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-serving-cert\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.262576 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-cluster-samples-operator\"/\"kube-root-ca.crt\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.297143 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-node-identity\"/\"ovnkube-identity-cm\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.307811 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"kube-root-ca.crt\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.396057 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-dockercfg-jcmfj\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.414320 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ovn-kubernetes\"/\"openshift-service-ca.crt\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.440620 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"mco-proxy-tls\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.447807 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.462893 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"image-registry-certificates\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.463301 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress\"/\"router-certs-default\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.512572 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"openshift-service-ca.crt\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.549467 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"metrics-daemon-sa-dockercfg-t8n29\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.673563 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"machine-api-operator-tls\"" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.691755 5113 reflector.go:430] "Caches populated" type="*v1.CSIDriver" reflector="k8s.io/client-go/informers/factory.go:160" Jan 30 00:13:58 crc kubenswrapper[5113]: I0130 00:13:58.792163 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-scheduler-operator\"/\"openshift-kube-scheduler-operator-config\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.005637 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"image-registry-operator-tls\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.058003 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-operator\"/\"metrics-tls\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.104177 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-etcd-operator\"/\"etcd-operator-config\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.106964 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca\"/\"signing-cabundle\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.124631 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-route-controller-manager\"/\"client-ca\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.146567 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operator-lifecycle-manager\"/\"openshift-service-ca.crt\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.218488 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"cluster-image-registry-operator-dockercfg-ntnd7\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.332753 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-network-node-identity\"/\"network-node-identity-cert\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.427572 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-operator\"/\"kube-root-ca.crt\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.453057 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-node-identity\"/\"openshift-service-ca.crt\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.470505 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console-operator\"/\"serving-cert\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.488247 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"proxy-tls\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.540303 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"openshift-service-ca.crt\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.827190 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-operator-images\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.839938 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns-operator\"/\"kube-root-ca.crt\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.849662 5113 reflector.go:430] "Caches populated" logger="kubernetes.io/kubelet-serving" type="*v1.CertificateSigningRequest" reflector="k8s.io/client-go/tools/watch/informerwatcher.go:162" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.875153 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-route-controller-manager\"/\"kube-root-ca.crt\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.902770 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-root-ca.crt\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.907185 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-apiserver-operator\"/\"kube-apiserver-operator-config\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.941905 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"redhat-operators-dockercfg-9gxlh\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.958017 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ingress-canary\"/\"default-dockercfg-9pgs7\"" Jan 30 00:13:59 crc kubenswrapper[5113]: I0130 00:13:59.972217 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress\"/\"service-ca-bundle\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.040484 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-machine-config-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.138277 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-network-operator\"/\"metrics-tls\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.144942 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-network-console\"/\"networking-console-plugin-cert\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.161341 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-controller-manager-operator\"/\"kube-controller-manager-operator-config\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.199671 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca\"/\"service-ca-dockercfg-bgxvm\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.237049 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"trusted-ca\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.239806 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"openshift-service-ca.crt\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.321677 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-service-ca\"/\"signing-key\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.354586 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-api\"/\"control-plane-machine-set-operator-dockercfg-gnx66\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.397491 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"multus-ancillary-tools-dockercfg-nwglk\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.484059 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"etcd-serving-ca\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.546114 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-etcd-operator\"/\"etcd-operator-serving-cert\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.598266 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.607895 5113 reflector.go:430] "Caches populated" type="*v1.Pod" reflector="pkg/kubelet/config/apiserver.go:66" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.615999 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-66458b6674-65wnm"] Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.616181 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-576bd486d8-68jgg","openshift-kube-apiserver/kube-apiserver-crc"] Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.616774 5113 kubelet.go:3323] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="df6f4867-b098-485b-81b7-844ef832d471" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.616816 5113 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="df6f4867-b098-485b-81b7-844ef832d471" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.617225 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" containerName="installer" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.617262 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" containerName="installer" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.617292 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" containerName="oauth-openshift" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.617302 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" containerName="oauth-openshift" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.617466 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" containerName="oauth-openshift" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.617483 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="6ab098a6-e349-4f74-b76f-e0ff9261030e" containerName="installer" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.618307 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"catalog-operator-serving-cert\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.632166 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.632246 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.638056 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-template-error\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.638490 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-serving-cert\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.638888 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-service-ca\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.641753 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"kube-root-ca.crt\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.642742 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-etcd-operator\"/\"etcd-operator-dockercfg-4vdnc\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.643057 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"audit\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.643140 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-router-certs\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.643161 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-template-provider-selection\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.643260 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-session\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.643299 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-template-login\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.643353 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-cliconfig\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.643609 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-user-idp-0-file-data\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.643680 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"openshift-service-ca.crt\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.643882 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"oauth-openshift-dockercfg-d2bf2\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.656398 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-trusted-ca-bundle\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.662026 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-authentication\"/\"v4-0-config-system-ocp-branding-template\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.665093 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=22.66470444 podStartE2EDuration="22.66470444s" podCreationTimestamp="2026-01-30 00:13:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:14:00.660344715 +0000 UTC m=+260.732950112" watchObservedRunningTime="2026-01-30 00:14:00.66470444 +0000 UTC m=+260.737309827" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.712139 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-serving-cert\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.712490 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.712627 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-cliconfig\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.712768 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3bde0ed2-71a3-4281-b21d-af61bdb778ef-audit-policies\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.712886 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3bde0ed2-71a3-4281-b21d-af61bdb778ef-audit-dir\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.713008 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-service-ca\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.713133 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-user-template-error\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.713240 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fg879\" (UniqueName: \"kubernetes.io/projected/3bde0ed2-71a3-4281-b21d-af61bdb778ef-kube-api-access-fg879\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.713343 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-router-certs\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.713447 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.713596 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-session\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.713715 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.713821 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-user-template-login\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.713919 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.720205 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"mcc-proxy-tls\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.784952 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-operator\"/\"trusted-ca\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.796597 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62474d91-1e1c-48ee-b28d-bfa517692c72" path="/var/lib/kubelet/pods/62474d91-1e1c-48ee-b28d-bfa517692c72/volumes" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.815104 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3bde0ed2-71a3-4281-b21d-af61bdb778ef-audit-dir\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.815210 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3bde0ed2-71a3-4281-b21d-af61bdb778ef-audit-dir\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.815218 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-service-ca\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.815314 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-user-template-error\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.815359 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-fg879\" (UniqueName: \"kubernetes.io/projected/3bde0ed2-71a3-4281-b21d-af61bdb778ef-kube-api-access-fg879\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.815408 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-router-certs\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.815484 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.816679 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-session\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.816791 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.816861 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-user-template-login\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.816909 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.816973 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-serving-cert\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.817055 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.817140 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-cliconfig\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.817597 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-service-ca\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.817889 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3bde0ed2-71a3-4281-b21d-af61bdb778ef-audit-policies\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.818158 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-cliconfig\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.818661 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.819355 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3bde0ed2-71a3-4281-b21d-af61bdb778ef-audit-policies\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.822216 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-user-template-error\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.822273 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.822371 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.822605 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-serving-cert\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.822765 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-session\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.824979 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-router-certs\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.826379 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-user-template-login\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.828716 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3bde0ed2-71a3-4281-b21d-af61bdb778ef-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.834331 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-fg879\" (UniqueName: \"kubernetes.io/projected/3bde0ed2-71a3-4281-b21d-af61bdb778ef-kube-api-access-fg879\") pod \"oauth-openshift-576bd486d8-68jgg\" (UID: \"3bde0ed2-71a3-4281-b21d-af61bdb778ef\") " pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.848922 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"openshift-service-ca.crt\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.868760 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-route-controller-manager\"/\"config\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.917621 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-multus\"/\"default-cni-sysctl-allowlist\"" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.960885 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:00 crc kubenswrapper[5113]: I0130 00:14:00.975855 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-console\"/\"console-config\"" Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.059998 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-576bd486d8-68jgg"] Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.092069 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-image-registry\"/\"serviceca\"" Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.198281 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-authentication-operator\"/\"kube-root-ca.crt\"" Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.198695 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-etcd-operator\"/\"etcd-client\"" Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.395568 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"kube-root-ca.crt\"" Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.429060 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-576bd486d8-68jgg"] Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.492126 5113 kubelet.go:2547] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.492569 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" containerName="startup-monitor" containerID="cri-o://c50078fe466ad595a60e90b7e6487c2280d765dfaac1c517b5804bcfe6c080e2" gracePeriod=5 Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.554770 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver\"/\"encryption-config-1\"" Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.610840 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"hostpath-provisioner\"/\"csi-hostpath-provisioner-sa-dockercfg-7dcws\"" Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.633571 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-diagnostics\"/\"kube-root-ca.crt\"" Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.653447 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"certified-operators-dockercfg-7cl8d\"" Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.706744 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-dns\"/\"dns-dockercfg-kpvmz\"" Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.773882 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"marketplace-operator-dockercfg-2cfkp\"" Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.780184 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-image-registry\"/\"node-ca-dockercfg-tjs74\"" Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.787589 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-machine-approver\"/\"machine-approver-tls\"" Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.837286 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-ovn-kubernetes\"/\"ovn-control-plane-metrics-cert\"" Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.890567 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" event={"ID":"3bde0ed2-71a3-4281-b21d-af61bdb778ef","Type":"ContainerStarted","Data":"e368bae6faa0343df9a6d1d00377118fafd600009df360e638234960d1838d8a"} Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.890643 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" event={"ID":"3bde0ed2-71a3-4281-b21d-af61bdb778ef","Type":"ContainerStarted","Data":"a147505a3ab4efc6891bbb50aa612226908a72496a00d04627f639a609429b61"} Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.891351 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.892739 5113 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="pods \"kube-apiserver-startup-monitor-crc\" is forbidden: User \"system:node:crc\" cannot get resource \"pods\" in API group \"\" in the namespace \"openshift-kube-apiserver\": no relationship found between node 'crc' and this object" Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.919030 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" podStartSLOduration=69.919005351 podStartE2EDuration="1m9.919005351s" podCreationTimestamp="2026-01-30 00:12:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:14:01.914484 +0000 UTC m=+261.987089397" watchObservedRunningTime="2026-01-30 00:14:01.919005351 +0000 UTC m=+261.991610728" Jan 30 00:14:01 crc kubenswrapper[5113]: I0130 00:14:01.935480 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"openshift-global-ca\"" Jan 30 00:14:02 crc kubenswrapper[5113]: I0130 00:14:02.025678 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-console\"/\"default-dockercfg-mdwwj\"" Jan 30 00:14:02 crc kubenswrapper[5113]: I0130 00:14:02.091174 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-samples-operator\"/\"cluster-samples-operator-dockercfg-jmhxf\"" Jan 30 00:14:02 crc kubenswrapper[5113]: I0130 00:14:02.094510 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:02 crc kubenswrapper[5113]: I0130 00:14:02.313640 5113 patch_prober.go:28] interesting pod/oauth-openshift-576bd486d8-68jgg container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.63:6443/healthz\": read tcp 10.217.0.2:37368->10.217.0.63:6443: read: connection reset by peer" start-of-body= Jan 30 00:14:02 crc kubenswrapper[5113]: I0130 00:14:02.313809 5113 prober.go:120] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" podUID="3bde0ed2-71a3-4281-b21d-af61bdb778ef" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.63:6443/healthz\": read tcp 10.217.0.2:37368->10.217.0.63:6443: read: connection reset by peer" Jan 30 00:14:02 crc kubenswrapper[5113]: I0130 00:14:02.356678 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-node-identity\"/\"env-overrides\"" Jan 30 00:14:02 crc kubenswrapper[5113]: I0130 00:14:02.529754 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-machine-config-operator\"/\"machine-config-server-dockercfg-dzw6b\"" Jan 30 00:14:02 crc kubenswrapper[5113]: I0130 00:14:02.679571 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-controller-manager\"/\"kube-root-ca.crt\"" Jan 30 00:14:02 crc kubenswrapper[5113]: I0130 00:14:02.898725 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-576bd486d8-68jgg_3bde0ed2-71a3-4281-b21d-af61bdb778ef/oauth-openshift/0.log" Jan 30 00:14:02 crc kubenswrapper[5113]: I0130 00:14:02.898796 5113 generic.go:358] "Generic (PLEG): container finished" podID="3bde0ed2-71a3-4281-b21d-af61bdb778ef" containerID="e368bae6faa0343df9a6d1d00377118fafd600009df360e638234960d1838d8a" exitCode=255 Jan 30 00:14:02 crc kubenswrapper[5113]: I0130 00:14:02.898967 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" event={"ID":"3bde0ed2-71a3-4281-b21d-af61bdb778ef","Type":"ContainerDied","Data":"e368bae6faa0343df9a6d1d00377118fafd600009df360e638234960d1838d8a"} Jan 30 00:14:02 crc kubenswrapper[5113]: I0130 00:14:02.899894 5113 scope.go:117] "RemoveContainer" containerID="e368bae6faa0343df9a6d1d00377118fafd600009df360e638234960d1838d8a" Jan 30 00:14:03 crc kubenswrapper[5113]: I0130 00:14:03.025060 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-apiserver\"/\"serving-cert\"" Jan 30 00:14:03 crc kubenswrapper[5113]: I0130 00:14:03.061044 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:03 crc kubenswrapper[5113]: I0130 00:14:03.072196 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"metrics-daemon-secret\"" Jan 30 00:14:03 crc kubenswrapper[5113]: I0130 00:14:03.136333 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-machine-approver\"/\"machine-approver-sa-dockercfg-wzhvk\"" Jan 30 00:14:03 crc kubenswrapper[5113]: I0130 00:14:03.317126 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-service-ca\"/\"openshift-service-ca.crt\"" Jan 30 00:14:03 crc kubenswrapper[5113]: I0130 00:14:03.625596 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-oauth-apiserver\"/\"openshift-service-ca.crt\"" Jan 30 00:14:03 crc kubenswrapper[5113]: I0130 00:14:03.674716 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-cluster-version\"/\"cluster-version-operator-serving-cert\"" Jan 30 00:14:03 crc kubenswrapper[5113]: I0130 00:14:03.704213 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-ingress-operator\"/\"kube-root-ca.crt\"" Jan 30 00:14:03 crc kubenswrapper[5113]: I0130 00:14:03.726113 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver\"/\"image-import-ca\"" Jan 30 00:14:03 crc kubenswrapper[5113]: I0130 00:14:03.907293 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-576bd486d8-68jgg_3bde0ed2-71a3-4281-b21d-af61bdb778ef/oauth-openshift/1.log" Jan 30 00:14:03 crc kubenswrapper[5113]: I0130 00:14:03.907745 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-576bd486d8-68jgg_3bde0ed2-71a3-4281-b21d-af61bdb778ef/oauth-openshift/0.log" Jan 30 00:14:03 crc kubenswrapper[5113]: I0130 00:14:03.907792 5113 generic.go:358] "Generic (PLEG): container finished" podID="3bde0ed2-71a3-4281-b21d-af61bdb778ef" containerID="2a77bc73ca6aa15ba01f8eed9047540dce82159b1a12908d2ae12b4a4fb034c4" exitCode=255 Jan 30 00:14:03 crc kubenswrapper[5113]: I0130 00:14:03.907843 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" event={"ID":"3bde0ed2-71a3-4281-b21d-af61bdb778ef","Type":"ContainerDied","Data":"2a77bc73ca6aa15ba01f8eed9047540dce82159b1a12908d2ae12b4a4fb034c4"} Jan 30 00:14:03 crc kubenswrapper[5113]: I0130 00:14:03.907928 5113 scope.go:117] "RemoveContainer" containerID="e368bae6faa0343df9a6d1d00377118fafd600009df360e638234960d1838d8a" Jan 30 00:14:03 crc kubenswrapper[5113]: I0130 00:14:03.908904 5113 scope.go:117] "RemoveContainer" containerID="2a77bc73ca6aa15ba01f8eed9047540dce82159b1a12908d2ae12b4a4fb034c4" Jan 30 00:14:03 crc kubenswrapper[5113]: E0130 00:14:03.909248 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oauth-openshift\" with CrashLoopBackOff: \"back-off 10s restarting failed container=oauth-openshift pod=oauth-openshift-576bd486d8-68jgg_openshift-authentication(3bde0ed2-71a3-4281-b21d-af61bdb778ef)\"" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" podUID="3bde0ed2-71a3-4281-b21d-af61bdb778ef" Jan 30 00:14:03 crc kubenswrapper[5113]: I0130 00:14:03.963065 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-config\"" Jan 30 00:14:04 crc kubenswrapper[5113]: I0130 00:14:04.020192 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"olm-operator-serviceaccount-dockercfg-4gqzj\"" Jan 30 00:14:04 crc kubenswrapper[5113]: I0130 00:14:04.149172 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-dns\"/\"kube-root-ca.crt\"" Jan 30 00:14:04 crc kubenswrapper[5113]: I0130 00:14:04.931945 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-576bd486d8-68jgg_3bde0ed2-71a3-4281-b21d-af61bdb778ef/oauth-openshift/1.log" Jan 30 00:14:04 crc kubenswrapper[5113]: I0130 00:14:04.933082 5113 scope.go:117] "RemoveContainer" containerID="2a77bc73ca6aa15ba01f8eed9047540dce82159b1a12908d2ae12b4a4fb034c4" Jan 30 00:14:04 crc kubenswrapper[5113]: E0130 00:14:04.933447 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oauth-openshift\" with CrashLoopBackOff: \"back-off 10s restarting failed container=oauth-openshift pod=oauth-openshift-576bd486d8-68jgg_openshift-authentication(3bde0ed2-71a3-4281-b21d-af61bdb778ef)\"" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" podUID="3bde0ed2-71a3-4281-b21d-af61bdb778ef" Jan 30 00:14:05 crc kubenswrapper[5113]: I0130 00:14:05.280558 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-network-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:05 crc kubenswrapper[5113]: I0130 00:14:05.772952 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-oauth-apiserver\"/\"oauth-apiserver-sa-dockercfg-qqw4z\"" Jan 30 00:14:06 crc kubenswrapper[5113]: I0130 00:14:06.811957 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq"] Jan 30 00:14:06 crc kubenswrapper[5113]: I0130 00:14:06.812807 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" podUID="62ca7dac-0e59-4c7e-9781-2a06507294a0" containerName="controller-manager" containerID="cri-o://1f61523e196bba5340f11e1ab683537f2b5ced12d33d3f08c8b95f9288d3ee8d" gracePeriod=30 Jan 30 00:14:06 crc kubenswrapper[5113]: I0130 00:14:06.821725 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs"] Jan 30 00:14:06 crc kubenswrapper[5113]: I0130 00:14:06.822088 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" podUID="f5f80b8d-d118-4c70-bf23-bd56e888bb3b" containerName="route-controller-manager" containerID="cri-o://4bc3b7d3c498c659e3fd157c3c91255382792c444d97cba2440670332341814f" gracePeriod=30 Jan 30 00:14:06 crc kubenswrapper[5113]: I0130 00:14:06.953222 5113 generic.go:358] "Generic (PLEG): container finished" podID="f5f80b8d-d118-4c70-bf23-bd56e888bb3b" containerID="4bc3b7d3c498c659e3fd157c3c91255382792c444d97cba2440670332341814f" exitCode=0 Jan 30 00:14:06 crc kubenswrapper[5113]: I0130 00:14:06.953349 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" event={"ID":"f5f80b8d-d118-4c70-bf23-bd56e888bb3b","Type":"ContainerDied","Data":"4bc3b7d3c498c659e3fd157c3c91255382792c444d97cba2440670332341814f"} Jan 30 00:14:06 crc kubenswrapper[5113]: I0130 00:14:06.956065 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f7dbc7e1ee9c187a863ef9b473fad27b/startup-monitor/0.log" Jan 30 00:14:06 crc kubenswrapper[5113]: I0130 00:14:06.956100 5113 generic.go:358] "Generic (PLEG): container finished" podID="f7dbc7e1ee9c187a863ef9b473fad27b" containerID="c50078fe466ad595a60e90b7e6487c2280d765dfaac1c517b5804bcfe6c080e2" exitCode=137 Jan 30 00:14:06 crc kubenswrapper[5113]: I0130 00:14:06.958237 5113 generic.go:358] "Generic (PLEG): container finished" podID="62ca7dac-0e59-4c7e-9781-2a06507294a0" containerID="1f61523e196bba5340f11e1ab683537f2b5ced12d33d3f08c8b95f9288d3ee8d" exitCode=0 Jan 30 00:14:06 crc kubenswrapper[5113]: I0130 00:14:06.958716 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" event={"ID":"62ca7dac-0e59-4c7e-9781-2a06507294a0","Type":"ContainerDied","Data":"1f61523e196bba5340f11e1ab683537f2b5ced12d33d3f08c8b95f9288d3ee8d"} Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.069372 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f7dbc7e1ee9c187a863ef9b473fad27b/startup-monitor/0.log" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.069561 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.071162 5113 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="pods \"kube-apiserver-startup-monitor-crc\" is forbidden: User \"system:node:crc\" cannot get resource \"pods\" in API group \"\" in the namespace \"openshift-kube-apiserver\": no relationship found between node 'crc' and this object" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.138145 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-lock\") pod \"f7dbc7e1ee9c187a863ef9b473fad27b\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.138282 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-lock" (OuterVolumeSpecName: "var-lock") pod "f7dbc7e1ee9c187a863ef9b473fad27b" (UID: "f7dbc7e1ee9c187a863ef9b473fad27b"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.138309 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-log\") pod \"f7dbc7e1ee9c187a863ef9b473fad27b\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.138354 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-resource-dir\") pod \"f7dbc7e1ee9c187a863ef9b473fad27b\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.138380 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-manifests\") pod \"f7dbc7e1ee9c187a863ef9b473fad27b\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.138387 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-log" (OuterVolumeSpecName: "var-log") pod "f7dbc7e1ee9c187a863ef9b473fad27b" (UID: "f7dbc7e1ee9c187a863ef9b473fad27b"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.138407 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f7dbc7e1ee9c187a863ef9b473fad27b" (UID: "f7dbc7e1ee9c187a863ef9b473fad27b"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.138414 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-pod-resource-dir\") pod \"f7dbc7e1ee9c187a863ef9b473fad27b\" (UID: \"f7dbc7e1ee9c187a863ef9b473fad27b\") " Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.138428 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-manifests" (OuterVolumeSpecName: "manifests") pod "f7dbc7e1ee9c187a863ef9b473fad27b" (UID: "f7dbc7e1ee9c187a863ef9b473fad27b"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.138591 5113 reconciler_common.go:299] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-log\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.138604 5113 reconciler_common.go:299] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.138617 5113 reconciler_common.go:299] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-manifests\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.138624 5113 reconciler_common.go:299] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-var-lock\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.154639 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f7dbc7e1ee9c187a863ef9b473fad27b" (UID: "f7dbc7e1ee9c187a863ef9b473fad27b"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.187980 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.208539 5113 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="pods \"kube-apiserver-startup-monitor-crc\" is forbidden: User \"system:node:crc\" cannot get resource \"pods\" in API group \"\" in the namespace \"openshift-kube-apiserver\": no relationship found between node 'crc' and this object" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.231157 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx"] Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.231781 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.231896 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" containerName="startup-monitor" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.231914 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" containerName="startup-monitor" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.231931 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="f5f80b8d-d118-4c70-bf23-bd56e888bb3b" containerName="route-controller-manager" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.231939 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5f80b8d-d118-4c70-bf23-bd56e888bb3b" containerName="route-controller-manager" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.232051 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="f5f80b8d-d118-4c70-bf23-bd56e888bb3b" containerName="route-controller-manager" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.232069 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" containerName="startup-monitor" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.232082 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="62ca7dac-0e59-4c7e-9781-2a06507294a0" containerName="controller-manager" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.236774 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.237282 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx"] Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.239756 5113 reconciler_common.go:299] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f7dbc7e1ee9c187a863ef9b473fad27b-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.264356 5113 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="pods \"kube-apiserver-startup-monitor-crc\" is forbidden: User \"system:node:crc\" cannot get resource \"pods\" in API group \"\" in the namespace \"openshift-kube-apiserver\": no relationship found between node 'crc' and this object" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.285573 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb"] Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.285750 5113 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="pods \"kube-apiserver-startup-monitor-crc\" is forbidden: User \"system:node:crc\" cannot get resource \"pods\" in API group \"\" in the namespace \"openshift-kube-apiserver\": no relationship found between node 'crc' and this object" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.286496 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="62ca7dac-0e59-4c7e-9781-2a06507294a0" containerName="controller-manager" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.286511 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="62ca7dac-0e59-4c7e-9781-2a06507294a0" containerName="controller-manager" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.292685 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb"] Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.292920 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.328088 5113 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="pods \"kube-apiserver-startup-monitor-crc\" is forbidden: User \"system:node:crc\" cannot get resource \"pods\" in API group \"\" in the namespace \"openshift-kube-apiserver\": no relationship found between node 'crc' and this object" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.340296 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-config\") pod \"62ca7dac-0e59-4c7e-9781-2a06507294a0\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.340377 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-client-ca\") pod \"62ca7dac-0e59-4c7e-9781-2a06507294a0\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.340399 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-client-ca\") pod \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.340459 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/62ca7dac-0e59-4c7e-9781-2a06507294a0-tmp\") pod \"62ca7dac-0e59-4c7e-9781-2a06507294a0\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.340497 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-proxy-ca-bundles\") pod \"62ca7dac-0e59-4c7e-9781-2a06507294a0\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.340560 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8bgv\" (UniqueName: \"kubernetes.io/projected/62ca7dac-0e59-4c7e-9781-2a06507294a0-kube-api-access-t8bgv\") pod \"62ca7dac-0e59-4c7e-9781-2a06507294a0\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.340627 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-tmp\") pod \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.340646 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/62ca7dac-0e59-4c7e-9781-2a06507294a0-serving-cert\") pod \"62ca7dac-0e59-4c7e-9781-2a06507294a0\" (UID: \"62ca7dac-0e59-4c7e-9781-2a06507294a0\") " Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.340681 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ndmzf\" (UniqueName: \"kubernetes.io/projected/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-kube-api-access-ndmzf\") pod \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.341052 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-tmp" (OuterVolumeSpecName: "tmp") pod "f5f80b8d-d118-4c70-bf23-bd56e888bb3b" (UID: "f5f80b8d-d118-4c70-bf23-bd56e888bb3b"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.341148 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62ca7dac-0e59-4c7e-9781-2a06507294a0-tmp" (OuterVolumeSpecName: "tmp") pod "62ca7dac-0e59-4c7e-9781-2a06507294a0" (UID: "62ca7dac-0e59-4c7e-9781-2a06507294a0"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.341194 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-config\") pod \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.341375 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-client-ca" (OuterVolumeSpecName: "client-ca") pod "f5f80b8d-d118-4c70-bf23-bd56e888bb3b" (UID: "f5f80b8d-d118-4c70-bf23-bd56e888bb3b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.341411 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-serving-cert\") pod \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\" (UID: \"f5f80b8d-d118-4c70-bf23-bd56e888bb3b\") " Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.341422 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-client-ca" (OuterVolumeSpecName: "client-ca") pod "62ca7dac-0e59-4c7e-9781-2a06507294a0" (UID: "62ca7dac-0e59-4c7e-9781-2a06507294a0"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.341760 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-config" (OuterVolumeSpecName: "config") pod "62ca7dac-0e59-4c7e-9781-2a06507294a0" (UID: "62ca7dac-0e59-4c7e-9781-2a06507294a0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.341892 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a7d5d1a0-e44f-4294-a738-bcf80c68e552-serving-cert\") pod \"route-controller-manager-8647b66944-85ghx\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.341998 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-config" (OuterVolumeSpecName: "config") pod "f5f80b8d-d118-4c70-bf23-bd56e888bb3b" (UID: "f5f80b8d-d118-4c70-bf23-bd56e888bb3b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.342016 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a7d5d1a0-e44f-4294-a738-bcf80c68e552-client-ca\") pod \"route-controller-manager-8647b66944-85ghx\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.342122 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7d5d1a0-e44f-4294-a738-bcf80c68e552-config\") pod \"route-controller-manager-8647b66944-85ghx\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.342193 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/a7d5d1a0-e44f-4294-a738-bcf80c68e552-tmp\") pod \"route-controller-manager-8647b66944-85ghx\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.342317 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5z9v2\" (UniqueName: \"kubernetes.io/projected/a7d5d1a0-e44f-4294-a738-bcf80c68e552-kube-api-access-5z9v2\") pod \"route-controller-manager-8647b66944-85ghx\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.342434 5113 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/62ca7dac-0e59-4c7e-9781-2a06507294a0-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.342466 5113 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.342484 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.342502 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.342550 5113 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.342567 5113 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.342899 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "62ca7dac-0e59-4c7e-9781-2a06507294a0" (UID: "62ca7dac-0e59-4c7e-9781-2a06507294a0"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.344189 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-kube-api-access-ndmzf" (OuterVolumeSpecName: "kube-api-access-ndmzf") pod "f5f80b8d-d118-4c70-bf23-bd56e888bb3b" (UID: "f5f80b8d-d118-4c70-bf23-bd56e888bb3b"). InnerVolumeSpecName "kube-api-access-ndmzf". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.345384 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62ca7dac-0e59-4c7e-9781-2a06507294a0-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "62ca7dac-0e59-4c7e-9781-2a06507294a0" (UID: "62ca7dac-0e59-4c7e-9781-2a06507294a0"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.345706 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62ca7dac-0e59-4c7e-9781-2a06507294a0-kube-api-access-t8bgv" (OuterVolumeSpecName: "kube-api-access-t8bgv") pod "62ca7dac-0e59-4c7e-9781-2a06507294a0" (UID: "62ca7dac-0e59-4c7e-9781-2a06507294a0"). InnerVolumeSpecName "kube-api-access-t8bgv". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.345719 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f5f80b8d-d118-4c70-bf23-bd56e888bb3b" (UID: "f5f80b8d-d118-4c70-bf23-bd56e888bb3b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.444241 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/a7d5d1a0-e44f-4294-a738-bcf80c68e552-tmp\") pod \"route-controller-manager-8647b66944-85ghx\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.444333 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7m46j\" (UniqueName: \"kubernetes.io/projected/b01842e8-bbf7-4552-ad63-05d286805307-kube-api-access-7m46j\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.444377 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/b01842e8-bbf7-4552-ad63-05d286805307-tmp\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.444418 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-client-ca\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.444474 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-5z9v2\" (UniqueName: \"kubernetes.io/projected/a7d5d1a0-e44f-4294-a738-bcf80c68e552-kube-api-access-5z9v2\") pod \"route-controller-manager-8647b66944-85ghx\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.444591 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-config\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.444638 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a7d5d1a0-e44f-4294-a738-bcf80c68e552-serving-cert\") pod \"route-controller-manager-8647b66944-85ghx\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.444684 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-proxy-ca-bundles\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.445475 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/a7d5d1a0-e44f-4294-a738-bcf80c68e552-tmp\") pod \"route-controller-manager-8647b66944-85ghx\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.445556 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a7d5d1a0-e44f-4294-a738-bcf80c68e552-client-ca\") pod \"route-controller-manager-8647b66944-85ghx\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.445635 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7d5d1a0-e44f-4294-a738-bcf80c68e552-config\") pod \"route-controller-manager-8647b66944-85ghx\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.445676 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b01842e8-bbf7-4552-ad63-05d286805307-serving-cert\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.445778 5113 reconciler_common.go:299] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/62ca7dac-0e59-4c7e-9781-2a06507294a0-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.445803 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-t8bgv\" (UniqueName: \"kubernetes.io/projected/62ca7dac-0e59-4c7e-9781-2a06507294a0-kube-api-access-t8bgv\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.445831 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/62ca7dac-0e59-4c7e-9781-2a06507294a0-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.445855 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-ndmzf\" (UniqueName: \"kubernetes.io/projected/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-kube-api-access-ndmzf\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.445883 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f5f80b8d-d118-4c70-bf23-bd56e888bb3b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.446901 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a7d5d1a0-e44f-4294-a738-bcf80c68e552-client-ca\") pod \"route-controller-manager-8647b66944-85ghx\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.447119 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7d5d1a0-e44f-4294-a738-bcf80c68e552-config\") pod \"route-controller-manager-8647b66944-85ghx\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.451113 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a7d5d1a0-e44f-4294-a738-bcf80c68e552-serving-cert\") pod \"route-controller-manager-8647b66944-85ghx\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.464507 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-5z9v2\" (UniqueName: \"kubernetes.io/projected/a7d5d1a0-e44f-4294-a738-bcf80c68e552-kube-api-access-5z9v2\") pod \"route-controller-manager-8647b66944-85ghx\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.547796 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-proxy-ca-bundles\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.548517 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b01842e8-bbf7-4552-ad63-05d286805307-serving-cert\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.548701 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-7m46j\" (UniqueName: \"kubernetes.io/projected/b01842e8-bbf7-4552-ad63-05d286805307-kube-api-access-7m46j\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.548751 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/b01842e8-bbf7-4552-ad63-05d286805307-tmp\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.548794 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-client-ca\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.548907 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-config\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.550397 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/b01842e8-bbf7-4552-ad63-05d286805307-tmp\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.551211 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-proxy-ca-bundles\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.552239 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-client-ca\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.556136 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-config\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.556345 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b01842e8-bbf7-4552-ad63-05d286805307-serving-cert\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.569045 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.586182 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-7m46j\" (UniqueName: \"kubernetes.io/projected/b01842e8-bbf7-4552-ad63-05d286805307-kube-api-access-7m46j\") pod \"controller-manager-5fbc6c9c4d-x4sqb\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.607458 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.910705 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx"] Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.948902 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb"] Jan 30 00:14:07 crc kubenswrapper[5113]: W0130 00:14:07.950753 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb01842e8_bbf7_4552_ad63_05d286805307.slice/crio-544f8da4d0f09f71a139d1510d361587d0d87e26be6854d795a117a867625f4f WatchSource:0}: Error finding container 544f8da4d0f09f71a139d1510d361587d0d87e26be6854d795a117a867625f4f: Status 404 returned error can't find the container with id 544f8da4d0f09f71a139d1510d361587d0d87e26be6854d795a117a867625f4f Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.964562 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" event={"ID":"a7d5d1a0-e44f-4294-a738-bcf80c68e552","Type":"ContainerStarted","Data":"2ddfb0c65b779b395a54b4891214dce2331e812421a962bc97e6cf4f0cf58346"} Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.965514 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" event={"ID":"b01842e8-bbf7-4552-ad63-05d286805307","Type":"ContainerStarted","Data":"544f8da4d0f09f71a139d1510d361587d0d87e26be6854d795a117a867625f4f"} Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.967113 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" event={"ID":"f5f80b8d-d118-4c70-bf23-bd56e888bb3b","Type":"ContainerDied","Data":"ebeba753f12123dfbda50c77451e4c4a68ec86e44d2e6bda1266e7334d6c2107"} Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.967171 5113 scope.go:117] "RemoveContainer" containerID="4bc3b7d3c498c659e3fd157c3c91255382792c444d97cba2440670332341814f" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.967246 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.968441 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f7dbc7e1ee9c187a863ef9b473fad27b/startup-monitor/0.log" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.968587 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.971248 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" event={"ID":"62ca7dac-0e59-4c7e-9781-2a06507294a0","Type":"ContainerDied","Data":"b35dfcb8946f8c7dcb504a3f71b116b48e19f3ae5d0f308fbcef0efec04e7491"} Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.971474 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.972315 5113 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="pods \"kube-apiserver-startup-monitor-crc\" is forbidden: User \"system:node:crc\" cannot get resource \"pods\" in API group \"\" in the namespace \"openshift-kube-apiserver\": no relationship found between node 'crc' and this object" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.974641 5113 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="pods \"kube-apiserver-startup-monitor-crc\" is forbidden: User \"system:node:crc\" cannot get resource \"pods\" in API group \"\" in the namespace \"openshift-kube-apiserver\": no relationship found between node 'crc' and this object" Jan 30 00:14:07 crc kubenswrapper[5113]: I0130 00:14:07.990124 5113 scope.go:117] "RemoveContainer" containerID="c50078fe466ad595a60e90b7e6487c2280d765dfaac1c517b5804bcfe6c080e2" Jan 30 00:14:08 crc kubenswrapper[5113]: I0130 00:14:08.001008 5113 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="pods \"kube-apiserver-startup-monitor-crc\" is forbidden: User \"system:node:crc\" cannot get resource \"pods\" in API group \"\" in the namespace \"openshift-kube-apiserver\": no relationship found between node 'crc' and this object" Jan 30 00:14:08 crc kubenswrapper[5113]: I0130 00:14:08.019913 5113 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="pods \"kube-apiserver-startup-monitor-crc\" is forbidden: User \"system:node:crc\" cannot get resource \"pods\" in API group \"\" in the namespace \"openshift-kube-apiserver\": no relationship found between node 'crc' and this object" Jan 30 00:14:08 crc kubenswrapper[5113]: I0130 00:14:08.022196 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq"] Jan 30 00:14:08 crc kubenswrapper[5113]: I0130 00:14:08.023158 5113 scope.go:117] "RemoveContainer" containerID="1f61523e196bba5340f11e1ab683537f2b5ced12d33d3f08c8b95f9288d3ee8d" Jan 30 00:14:08 crc kubenswrapper[5113]: I0130 00:14:08.025741 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-c9d77bb8f-rbfjq"] Jan 30 00:14:08 crc kubenswrapper[5113]: I0130 00:14:08.043654 5113 status_manager.go:895] "Failed to get status for pod" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="pods \"kube-apiserver-startup-monitor-crc\" is forbidden: User \"system:node:crc\" cannot get resource \"pods\" in API group \"\" in the namespace \"openshift-kube-apiserver\": no relationship found between node 'crc' and this object" Jan 30 00:14:08 crc kubenswrapper[5113]: I0130 00:14:08.048312 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs"] Jan 30 00:14:08 crc kubenswrapper[5113]: I0130 00:14:08.052093 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-65d7f4859c-5kgzs"] Jan 30 00:14:08 crc kubenswrapper[5113]: I0130 00:14:08.782266 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62ca7dac-0e59-4c7e-9781-2a06507294a0" path="/var/lib/kubelet/pods/62ca7dac-0e59-4c7e-9781-2a06507294a0/volumes" Jan 30 00:14:08 crc kubenswrapper[5113]: I0130 00:14:08.783797 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5f80b8d-d118-4c70-bf23-bd56e888bb3b" path="/var/lib/kubelet/pods/f5f80b8d-d118-4c70-bf23-bd56e888bb3b/volumes" Jan 30 00:14:08 crc kubenswrapper[5113]: I0130 00:14:08.784423 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7dbc7e1ee9c187a863ef9b473fad27b" path="/var/lib/kubelet/pods/f7dbc7e1ee9c187a863ef9b473fad27b/volumes" Jan 30 00:14:08 crc kubenswrapper[5113]: I0130 00:14:08.979428 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" event={"ID":"b01842e8-bbf7-4552-ad63-05d286805307","Type":"ContainerStarted","Data":"ba0abb06c0fa87c2861585ce87a395d904382db3e2009a55c970f8d6525ba863"} Jan 30 00:14:08 crc kubenswrapper[5113]: I0130 00:14:08.980599 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:08 crc kubenswrapper[5113]: I0130 00:14:08.987017 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" event={"ID":"a7d5d1a0-e44f-4294-a738-bcf80c68e552","Type":"ContainerStarted","Data":"5a7b2bd3e7ac928581fd8132587cdee882fa349507ffbb671a7ee70ef72f8e79"} Jan 30 00:14:08 crc kubenswrapper[5113]: I0130 00:14:08.987263 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:08 crc kubenswrapper[5113]: I0130 00:14:08.987875 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:08 crc kubenswrapper[5113]: I0130 00:14:08.992344 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:09 crc kubenswrapper[5113]: I0130 00:14:09.004068 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" podStartSLOduration=3.004045264 podStartE2EDuration="3.004045264s" podCreationTimestamp="2026-01-30 00:14:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:14:08.999997428 +0000 UTC m=+269.072602805" watchObservedRunningTime="2026-01-30 00:14:09.004045264 +0000 UTC m=+269.076650641" Jan 30 00:14:09 crc kubenswrapper[5113]: I0130 00:14:09.028657 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" podStartSLOduration=3.028629545 podStartE2EDuration="3.028629545s" podCreationTimestamp="2026-01-30 00:14:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:14:09.019186372 +0000 UTC m=+269.091791749" watchObservedRunningTime="2026-01-30 00:14:09.028629545 +0000 UTC m=+269.101234922" Jan 30 00:14:10 crc kubenswrapper[5113]: I0130 00:14:10.961043 5113 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:10 crc kubenswrapper[5113]: I0130 00:14:10.962162 5113 scope.go:117] "RemoveContainer" containerID="2a77bc73ca6aa15ba01f8eed9047540dce82159b1a12908d2ae12b4a4fb034c4" Jan 30 00:14:10 crc kubenswrapper[5113]: E0130 00:14:10.962425 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oauth-openshift\" with CrashLoopBackOff: \"back-off 10s restarting failed container=oauth-openshift pod=oauth-openshift-576bd486d8-68jgg_openshift-authentication(3bde0ed2-71a3-4281-b21d-af61bdb778ef)\"" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" podUID="3bde0ed2-71a3-4281-b21d-af61bdb778ef" Jan 30 00:14:11 crc kubenswrapper[5113]: I0130 00:14:11.747554 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"packageserver-service-cert\"" Jan 30 00:14:11 crc kubenswrapper[5113]: I0130 00:14:11.893167 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:11 crc kubenswrapper[5113]: I0130 00:14:11.894394 5113 scope.go:117] "RemoveContainer" containerID="2a77bc73ca6aa15ba01f8eed9047540dce82159b1a12908d2ae12b4a4fb034c4" Jan 30 00:14:11 crc kubenswrapper[5113]: E0130 00:14:11.894804 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"oauth-openshift\" with CrashLoopBackOff: \"back-off 10s restarting failed container=oauth-openshift pod=oauth-openshift-576bd486d8-68jgg_openshift-authentication(3bde0ed2-71a3-4281-b21d-af61bdb778ef)\"" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" podUID="3bde0ed2-71a3-4281-b21d-af61bdb778ef" Jan 30 00:14:15 crc kubenswrapper[5113]: I0130 00:14:15.394438 5113 ???:1] "http: TLS handshake error from 192.168.126.11:60928: no serving certificate available for the kubelet" Jan 30 00:14:21 crc kubenswrapper[5113]: I0130 00:14:21.195885 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:14:21 crc kubenswrapper[5113]: I0130 00:14:21.196505 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:14:21 crc kubenswrapper[5113]: I0130 00:14:21.196626 5113 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:14:21 crc kubenswrapper[5113]: I0130 00:14:21.197578 5113 kuberuntime_manager.go:1107] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3448c37a3f80491c2d3cfa4d86f18abd9731d5d8a7722c07abbbacc4c6189249"} pod="openshift-machine-config-operator/machine-config-daemon-gxph5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 00:14:21 crc kubenswrapper[5113]: I0130 00:14:21.197641 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" containerID="cri-o://3448c37a3f80491c2d3cfa4d86f18abd9731d5d8a7722c07abbbacc4c6189249" gracePeriod=600 Jan 30 00:14:22 crc kubenswrapper[5113]: I0130 00:14:22.081471 5113 generic.go:358] "Generic (PLEG): container finished" podID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerID="3448c37a3f80491c2d3cfa4d86f18abd9731d5d8a7722c07abbbacc4c6189249" exitCode=0 Jan 30 00:14:22 crc kubenswrapper[5113]: I0130 00:14:22.081604 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" event={"ID":"dccb6bc1-d2db-4bf2-a0db-1c84219d0499","Type":"ContainerDied","Data":"3448c37a3f80491c2d3cfa4d86f18abd9731d5d8a7722c07abbbacc4c6189249"} Jan 30 00:14:22 crc kubenswrapper[5113]: I0130 00:14:22.082620 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" event={"ID":"dccb6bc1-d2db-4bf2-a0db-1c84219d0499","Type":"ContainerStarted","Data":"d2c027b2bad57e7da7ce244d6807304cf78d526f9fcc9b86a42e7865e2bae0e7"} Jan 30 00:14:22 crc kubenswrapper[5113]: I0130 00:14:22.084310 5113 generic.go:358] "Generic (PLEG): container finished" podID="f7e91b03-5282-4f6e-8ed2-a44afa3fc350" containerID="567bebbbe1416f08b7f812e6bca9ea03e75327b2ea279303897af85a298d51bb" exitCode=0 Jan 30 00:14:22 crc kubenswrapper[5113]: I0130 00:14:22.084343 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" event={"ID":"f7e91b03-5282-4f6e-8ed2-a44afa3fc350","Type":"ContainerDied","Data":"567bebbbe1416f08b7f812e6bca9ea03e75327b2ea279303897af85a298d51bb"} Jan 30 00:14:22 crc kubenswrapper[5113]: I0130 00:14:22.084804 5113 scope.go:117] "RemoveContainer" containerID="567bebbbe1416f08b7f812e6bca9ea03e75327b2ea279303897af85a298d51bb" Jan 30 00:14:22 crc kubenswrapper[5113]: I0130 00:14:22.640009 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-multus\"/\"multus-admission-controller-secret\"" Jan 30 00:14:23 crc kubenswrapper[5113]: I0130 00:14:23.096921 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" event={"ID":"f7e91b03-5282-4f6e-8ed2-a44afa3fc350","Type":"ContainerStarted","Data":"eb82d61e9d30b32d41beb3437db5fc66e5f4749f7aa3e2448a0f58cd81714364"} Jan 30 00:14:23 crc kubenswrapper[5113]: I0130 00:14:23.097706 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:14:23 crc kubenswrapper[5113]: I0130 00:14:23.101416 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:14:24 crc kubenswrapper[5113]: I0130 00:14:24.773259 5113 scope.go:117] "RemoveContainer" containerID="2a77bc73ca6aa15ba01f8eed9047540dce82159b1a12908d2ae12b4a4fb034c4" Jan 30 00:14:25 crc kubenswrapper[5113]: I0130 00:14:25.114075 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-576bd486d8-68jgg_3bde0ed2-71a3-4281-b21d-af61bdb778ef/oauth-openshift/1.log" Jan 30 00:14:26 crc kubenswrapper[5113]: I0130 00:14:26.126234 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-576bd486d8-68jgg_3bde0ed2-71a3-4281-b21d-af61bdb778ef/oauth-openshift/1.log" Jan 30 00:14:26 crc kubenswrapper[5113]: I0130 00:14:26.126431 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" event={"ID":"3bde0ed2-71a3-4281-b21d-af61bdb778ef","Type":"ContainerStarted","Data":"66795a08772714f033b425766f05b48c7f56d3a5e904a8ab4ea288dc559c817e"} Jan 30 00:14:26 crc kubenswrapper[5113]: I0130 00:14:26.127077 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:26 crc kubenswrapper[5113]: I0130 00:14:26.135515 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-576bd486d8-68jgg" Jan 30 00:14:26 crc kubenswrapper[5113]: I0130 00:14:26.761933 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb"] Jan 30 00:14:26 crc kubenswrapper[5113]: I0130 00:14:26.762606 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" podUID="b01842e8-bbf7-4552-ad63-05d286805307" containerName="controller-manager" containerID="cri-o://ba0abb06c0fa87c2861585ce87a395d904382db3e2009a55c970f8d6525ba863" gracePeriod=30 Jan 30 00:14:26 crc kubenswrapper[5113]: I0130 00:14:26.768086 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx"] Jan 30 00:14:26 crc kubenswrapper[5113]: I0130 00:14:26.768503 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" podUID="a7d5d1a0-e44f-4294-a738-bcf80c68e552" containerName="route-controller-manager" containerID="cri-o://5a7b2bd3e7ac928581fd8132587cdee882fa349507ffbb671a7ee70ef72f8e79" gracePeriod=30 Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.134217 5113 generic.go:358] "Generic (PLEG): container finished" podID="a7d5d1a0-e44f-4294-a738-bcf80c68e552" containerID="5a7b2bd3e7ac928581fd8132587cdee882fa349507ffbb671a7ee70ef72f8e79" exitCode=0 Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.134472 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" event={"ID":"a7d5d1a0-e44f-4294-a738-bcf80c68e552","Type":"ContainerDied","Data":"5a7b2bd3e7ac928581fd8132587cdee882fa349507ffbb671a7ee70ef72f8e79"} Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.136161 5113 generic.go:358] "Generic (PLEG): container finished" podID="b01842e8-bbf7-4552-ad63-05d286805307" containerID="ba0abb06c0fa87c2861585ce87a395d904382db3e2009a55c970f8d6525ba863" exitCode=0 Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.137222 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" event={"ID":"b01842e8-bbf7-4552-ad63-05d286805307","Type":"ContainerDied","Data":"ba0abb06c0fa87c2861585ce87a395d904382db3e2009a55c970f8d6525ba863"} Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.286333 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.323025 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc"] Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.323735 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="a7d5d1a0-e44f-4294-a738-bcf80c68e552" containerName="route-controller-manager" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.323757 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7d5d1a0-e44f-4294-a738-bcf80c68e552" containerName="route-controller-manager" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.323865 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="a7d5d1a0-e44f-4294-a738-bcf80c68e552" containerName="route-controller-manager" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.333259 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.350406 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc"] Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.472189 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5z9v2\" (UniqueName: \"kubernetes.io/projected/a7d5d1a0-e44f-4294-a738-bcf80c68e552-kube-api-access-5z9v2\") pod \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.472375 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a7d5d1a0-e44f-4294-a738-bcf80c68e552-serving-cert\") pod \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.472415 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/a7d5d1a0-e44f-4294-a738-bcf80c68e552-tmp\") pod \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.472461 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a7d5d1a0-e44f-4294-a738-bcf80c68e552-client-ca\") pod \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.472537 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7d5d1a0-e44f-4294-a738-bcf80c68e552-config\") pod \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\" (UID: \"a7d5d1a0-e44f-4294-a738-bcf80c68e552\") " Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.472823 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-config\") pod \"route-controller-manager-5d697d675d-z4jdc\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.472954 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-client-ca\") pod \"route-controller-manager-5d697d675d-z4jdc\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.473120 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7d5d1a0-e44f-4294-a738-bcf80c68e552-tmp" (OuterVolumeSpecName: "tmp") pod "a7d5d1a0-e44f-4294-a738-bcf80c68e552" (UID: "a7d5d1a0-e44f-4294-a738-bcf80c68e552"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.473181 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qmjk\" (UniqueName: \"kubernetes.io/projected/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-kube-api-access-2qmjk\") pod \"route-controller-manager-5d697d675d-z4jdc\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.473336 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-tmp\") pod \"route-controller-manager-5d697d675d-z4jdc\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.473398 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-serving-cert\") pod \"route-controller-manager-5d697d675d-z4jdc\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.473549 5113 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/a7d5d1a0-e44f-4294-a738-bcf80c68e552-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.473690 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7d5d1a0-e44f-4294-a738-bcf80c68e552-client-ca" (OuterVolumeSpecName: "client-ca") pod "a7d5d1a0-e44f-4294-a738-bcf80c68e552" (UID: "a7d5d1a0-e44f-4294-a738-bcf80c68e552"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.473818 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7d5d1a0-e44f-4294-a738-bcf80c68e552-config" (OuterVolumeSpecName: "config") pod "a7d5d1a0-e44f-4294-a738-bcf80c68e552" (UID: "a7d5d1a0-e44f-4294-a738-bcf80c68e552"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.481958 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7d5d1a0-e44f-4294-a738-bcf80c68e552-kube-api-access-5z9v2" (OuterVolumeSpecName: "kube-api-access-5z9v2") pod "a7d5d1a0-e44f-4294-a738-bcf80c68e552" (UID: "a7d5d1a0-e44f-4294-a738-bcf80c68e552"). InnerVolumeSpecName "kube-api-access-5z9v2". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.484405 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7d5d1a0-e44f-4294-a738-bcf80c68e552-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a7d5d1a0-e44f-4294-a738-bcf80c68e552" (UID: "a7d5d1a0-e44f-4294-a738-bcf80c68e552"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.547942 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.574899 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-tmp\") pod \"route-controller-manager-5d697d675d-z4jdc\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.574959 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-serving-cert\") pod \"route-controller-manager-5d697d675d-z4jdc\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.575011 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-config\") pod \"route-controller-manager-5d697d675d-z4jdc\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.575031 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-client-ca\") pod \"route-controller-manager-5d697d675d-z4jdc\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.575082 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-2qmjk\" (UniqueName: \"kubernetes.io/projected/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-kube-api-access-2qmjk\") pod \"route-controller-manager-5d697d675d-z4jdc\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.575131 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a7d5d1a0-e44f-4294-a738-bcf80c68e552-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.575147 5113 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a7d5d1a0-e44f-4294-a738-bcf80c68e552-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.575160 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7d5d1a0-e44f-4294-a738-bcf80c68e552-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.575173 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-5z9v2\" (UniqueName: \"kubernetes.io/projected/a7d5d1a0-e44f-4294-a738-bcf80c68e552-kube-api-access-5z9v2\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.576871 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-config\") pod \"route-controller-manager-5d697d675d-z4jdc\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.577618 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-client-ca\") pod \"route-controller-manager-5d697d675d-z4jdc\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.578038 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-tmp\") pod \"route-controller-manager-5d697d675d-z4jdc\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.579896 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-serving-cert\") pod \"route-controller-manager-5d697d675d-z4jdc\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.619793 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-8697b7fb76-f52js"] Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.620543 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="b01842e8-bbf7-4552-ad63-05d286805307" containerName="controller-manager" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.620567 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="b01842e8-bbf7-4552-ad63-05d286805307" containerName="controller-manager" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.620689 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="b01842e8-bbf7-4552-ad63-05d286805307" containerName="controller-manager" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.631953 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.635330 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8697b7fb76-f52js"] Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.635830 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qmjk\" (UniqueName: \"kubernetes.io/projected/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-kube-api-access-2qmjk\") pod \"route-controller-manager-5d697d675d-z4jdc\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.656436 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.676274 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-config\") pod \"b01842e8-bbf7-4552-ad63-05d286805307\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.676371 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-client-ca\") pod \"b01842e8-bbf7-4552-ad63-05d286805307\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.676484 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b01842e8-bbf7-4552-ad63-05d286805307-serving-cert\") pod \"b01842e8-bbf7-4552-ad63-05d286805307\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.676558 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-proxy-ca-bundles\") pod \"b01842e8-bbf7-4552-ad63-05d286805307\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.676760 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7m46j\" (UniqueName: \"kubernetes.io/projected/b01842e8-bbf7-4552-ad63-05d286805307-kube-api-access-7m46j\") pod \"b01842e8-bbf7-4552-ad63-05d286805307\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.676779 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/b01842e8-bbf7-4552-ad63-05d286805307-tmp\") pod \"b01842e8-bbf7-4552-ad63-05d286805307\" (UID: \"b01842e8-bbf7-4552-ad63-05d286805307\") " Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.677797 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b01842e8-bbf7-4552-ad63-05d286805307-tmp" (OuterVolumeSpecName: "tmp") pod "b01842e8-bbf7-4552-ad63-05d286805307" (UID: "b01842e8-bbf7-4552-ad63-05d286805307"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.678408 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "b01842e8-bbf7-4552-ad63-05d286805307" (UID: "b01842e8-bbf7-4552-ad63-05d286805307"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.678579 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-client-ca" (OuterVolumeSpecName: "client-ca") pod "b01842e8-bbf7-4552-ad63-05d286805307" (UID: "b01842e8-bbf7-4552-ad63-05d286805307"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.680031 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-config" (OuterVolumeSpecName: "config") pod "b01842e8-bbf7-4552-ad63-05d286805307" (UID: "b01842e8-bbf7-4552-ad63-05d286805307"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.681734 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b01842e8-bbf7-4552-ad63-05d286805307-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "b01842e8-bbf7-4552-ad63-05d286805307" (UID: "b01842e8-bbf7-4552-ad63-05d286805307"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.681940 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b01842e8-bbf7-4552-ad63-05d286805307-kube-api-access-7m46j" (OuterVolumeSpecName: "kube-api-access-7m46j") pod "b01842e8-bbf7-4552-ad63-05d286805307" (UID: "b01842e8-bbf7-4552-ad63-05d286805307"). InnerVolumeSpecName "kube-api-access-7m46j". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.778501 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-proxy-ca-bundles\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.778566 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-config\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.778626 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqkwx\" (UniqueName: \"kubernetes.io/projected/041afcdf-8351-4a75-b316-bac199ef8240-kube-api-access-bqkwx\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.778644 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/041afcdf-8351-4a75-b316-bac199ef8240-serving-cert\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.778665 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/041afcdf-8351-4a75-b316-bac199ef8240-tmp\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.778699 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-client-ca\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.778749 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-7m46j\" (UniqueName: \"kubernetes.io/projected/b01842e8-bbf7-4552-ad63-05d286805307-kube-api-access-7m46j\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.778762 5113 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/b01842e8-bbf7-4552-ad63-05d286805307-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.778772 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.778782 5113 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.778791 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b01842e8-bbf7-4552-ad63-05d286805307-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.778800 5113 reconciler_common.go:299] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b01842e8-bbf7-4552-ad63-05d286805307-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.880039 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-proxy-ca-bundles\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.880122 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-config\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.880227 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-bqkwx\" (UniqueName: \"kubernetes.io/projected/041afcdf-8351-4a75-b316-bac199ef8240-kube-api-access-bqkwx\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.880254 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/041afcdf-8351-4a75-b316-bac199ef8240-serving-cert\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.880286 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/041afcdf-8351-4a75-b316-bac199ef8240-tmp\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.880328 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-client-ca\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.881468 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-client-ca\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.882498 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-proxy-ca-bundles\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.882994 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/041afcdf-8351-4a75-b316-bac199ef8240-tmp\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.883544 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-config\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.887726 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/041afcdf-8351-4a75-b316-bac199ef8240-serving-cert\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.900957 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqkwx\" (UniqueName: \"kubernetes.io/projected/041afcdf-8351-4a75-b316-bac199ef8240-kube-api-access-bqkwx\") pod \"controller-manager-8697b7fb76-f52js\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:27 crc kubenswrapper[5113]: I0130 00:14:27.954588 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:28 crc kubenswrapper[5113]: I0130 00:14:28.083647 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc"] Jan 30 00:14:28 crc kubenswrapper[5113]: I0130 00:14:28.146567 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" Jan 30 00:14:28 crc kubenswrapper[5113]: I0130 00:14:28.146636 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb" event={"ID":"b01842e8-bbf7-4552-ad63-05d286805307","Type":"ContainerDied","Data":"544f8da4d0f09f71a139d1510d361587d0d87e26be6854d795a117a867625f4f"} Jan 30 00:14:28 crc kubenswrapper[5113]: I0130 00:14:28.146712 5113 scope.go:117] "RemoveContainer" containerID="ba0abb06c0fa87c2861585ce87a395d904382db3e2009a55c970f8d6525ba863" Jan 30 00:14:28 crc kubenswrapper[5113]: I0130 00:14:28.150269 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" event={"ID":"d5bd7cbd-4a52-4e40-8d87-72db36f52f14","Type":"ContainerStarted","Data":"3a232b016eb3994823da17d717253098cd0350e5248ca2abb861ab3c9cd0375a"} Jan 30 00:14:28 crc kubenswrapper[5113]: I0130 00:14:28.155472 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" event={"ID":"a7d5d1a0-e44f-4294-a738-bcf80c68e552","Type":"ContainerDied","Data":"2ddfb0c65b779b395a54b4891214dce2331e812421a962bc97e6cf4f0cf58346"} Jan 30 00:14:28 crc kubenswrapper[5113]: I0130 00:14:28.155589 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx" Jan 30 00:14:28 crc kubenswrapper[5113]: I0130 00:14:28.177106 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8697b7fb76-f52js"] Jan 30 00:14:28 crc kubenswrapper[5113]: I0130 00:14:28.190038 5113 scope.go:117] "RemoveContainer" containerID="5a7b2bd3e7ac928581fd8132587cdee882fa349507ffbb671a7ee70ef72f8e79" Jan 30 00:14:28 crc kubenswrapper[5113]: W0130 00:14:28.196936 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod041afcdf_8351_4a75_b316_bac199ef8240.slice/crio-52f329e5d5c1feaa57908165b5d0a341fa0d658704c28fc1755d47f4a2931de3 WatchSource:0}: Error finding container 52f329e5d5c1feaa57908165b5d0a341fa0d658704c28fc1755d47f4a2931de3: Status 404 returned error can't find the container with id 52f329e5d5c1feaa57908165b5d0a341fa0d658704c28fc1755d47f4a2931de3 Jan 30 00:14:28 crc kubenswrapper[5113]: I0130 00:14:28.217955 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx"] Jan 30 00:14:28 crc kubenswrapper[5113]: I0130 00:14:28.227839 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8647b66944-85ghx"] Jan 30 00:14:28 crc kubenswrapper[5113]: I0130 00:14:28.232124 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb"] Jan 30 00:14:28 crc kubenswrapper[5113]: I0130 00:14:28.235966 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-5fbc6c9c4d-x4sqb"] Jan 30 00:14:28 crc kubenswrapper[5113]: I0130 00:14:28.784360 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7d5d1a0-e44f-4294-a738-bcf80c68e552" path="/var/lib/kubelet/pods/a7d5d1a0-e44f-4294-a738-bcf80c68e552/volumes" Jan 30 00:14:28 crc kubenswrapper[5113]: I0130 00:14:28.785510 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b01842e8-bbf7-4552-ad63-05d286805307" path="/var/lib/kubelet/pods/b01842e8-bbf7-4552-ad63-05d286805307/volumes" Jan 30 00:14:29 crc kubenswrapper[5113]: I0130 00:14:29.164829 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" event={"ID":"d5bd7cbd-4a52-4e40-8d87-72db36f52f14","Type":"ContainerStarted","Data":"644d591aa00eba438d28bd05a407ddb2029e0c7838e613936c9332743260258e"} Jan 30 00:14:29 crc kubenswrapper[5113]: I0130 00:14:29.165297 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:29 crc kubenswrapper[5113]: I0130 00:14:29.167511 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" event={"ID":"041afcdf-8351-4a75-b316-bac199ef8240","Type":"ContainerStarted","Data":"9d0e53c3385536563877bb6cce175765169513099c29159aa89cd32305eb3b0c"} Jan 30 00:14:29 crc kubenswrapper[5113]: I0130 00:14:29.167786 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:29 crc kubenswrapper[5113]: I0130 00:14:29.167825 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" event={"ID":"041afcdf-8351-4a75-b316-bac199ef8240","Type":"ContainerStarted","Data":"52f329e5d5c1feaa57908165b5d0a341fa0d658704c28fc1755d47f4a2931de3"} Jan 30 00:14:29 crc kubenswrapper[5113]: I0130 00:14:29.173443 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:14:29 crc kubenswrapper[5113]: I0130 00:14:29.176288 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:14:29 crc kubenswrapper[5113]: I0130 00:14:29.222384 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" podStartSLOduration=3.222348154 podStartE2EDuration="3.222348154s" podCreationTimestamp="2026-01-30 00:14:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:14:29.195542964 +0000 UTC m=+289.268148341" watchObservedRunningTime="2026-01-30 00:14:29.222348154 +0000 UTC m=+289.294953591" Jan 30 00:14:29 crc kubenswrapper[5113]: I0130 00:14:29.222645 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" podStartSLOduration=3.222635553 podStartE2EDuration="3.222635553s" podCreationTimestamp="2026-01-30 00:14:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:14:29.215870944 +0000 UTC m=+289.288476361" watchObservedRunningTime="2026-01-30 00:14:29.222635553 +0000 UTC m=+289.295241000" Jan 30 00:14:29 crc kubenswrapper[5113]: I0130 00:14:29.676018 5113 ???:1] "http: TLS handshake error from 192.168.126.11:35808: no serving certificate available for the kubelet" Jan 30 00:14:30 crc kubenswrapper[5113]: I0130 00:14:30.046139 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-kube-storage-version-migrator-operator\"/\"serving-cert\"" Jan 30 00:14:35 crc kubenswrapper[5113]: I0130 00:14:35.589393 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-marketplace\"/\"openshift-service-ca.crt\"" Jan 30 00:14:37 crc kubenswrapper[5113]: I0130 00:14:37.964478 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-kube-storage-version-migrator\"/\"openshift-service-ca.crt\"" Jan 30 00:14:40 crc kubenswrapper[5113]: I0130 00:14:40.906075 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-576bd486d8-68jgg_3bde0ed2-71a3-4281-b21d-af61bdb778ef/oauth-openshift/1.log" Jan 30 00:14:40 crc kubenswrapper[5113]: I0130 00:14:40.908803 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-576bd486d8-68jgg_3bde0ed2-71a3-4281-b21d-af61bdb778ef/oauth-openshift/1.log" Jan 30 00:14:40 crc kubenswrapper[5113]: I0130 00:14:40.968499 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/0.log" Jan 30 00:14:40 crc kubenswrapper[5113]: I0130 00:14:40.969599 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/0.log" Jan 30 00:14:48 crc kubenswrapper[5113]: I0130 00:14:48.342052 5113 generic.go:358] "Generic (PLEG): container finished" podID="28e0ef1a-f823-4898-90a3-66c67c5f19eb" containerID="1ed56093ec350e5ae16ceb54a0bb322604ae288e2ad280a268a7994b7909d637" exitCode=0 Jan 30 00:14:48 crc kubenswrapper[5113]: I0130 00:14:48.342158 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29495520-b9gz9" event={"ID":"28e0ef1a-f823-4898-90a3-66c67c5f19eb","Type":"ContainerDied","Data":"1ed56093ec350e5ae16ceb54a0bb322604ae288e2ad280a268a7994b7909d637"} Jan 30 00:14:49 crc kubenswrapper[5113]: I0130 00:14:49.698728 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29495520-b9gz9" Jan 30 00:14:49 crc kubenswrapper[5113]: I0130 00:14:49.727606 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/28e0ef1a-f823-4898-90a3-66c67c5f19eb-serviceca\") pod \"28e0ef1a-f823-4898-90a3-66c67c5f19eb\" (UID: \"28e0ef1a-f823-4898-90a3-66c67c5f19eb\") " Jan 30 00:14:49 crc kubenswrapper[5113]: I0130 00:14:49.727725 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nvbqt\" (UniqueName: \"kubernetes.io/projected/28e0ef1a-f823-4898-90a3-66c67c5f19eb-kube-api-access-nvbqt\") pod \"28e0ef1a-f823-4898-90a3-66c67c5f19eb\" (UID: \"28e0ef1a-f823-4898-90a3-66c67c5f19eb\") " Jan 30 00:14:49 crc kubenswrapper[5113]: I0130 00:14:49.728685 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28e0ef1a-f823-4898-90a3-66c67c5f19eb-serviceca" (OuterVolumeSpecName: "serviceca") pod "28e0ef1a-f823-4898-90a3-66c67c5f19eb" (UID: "28e0ef1a-f823-4898-90a3-66c67c5f19eb"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:14:49 crc kubenswrapper[5113]: I0130 00:14:49.739107 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28e0ef1a-f823-4898-90a3-66c67c5f19eb-kube-api-access-nvbqt" (OuterVolumeSpecName: "kube-api-access-nvbqt") pod "28e0ef1a-f823-4898-90a3-66c67c5f19eb" (UID: "28e0ef1a-f823-4898-90a3-66c67c5f19eb"). InnerVolumeSpecName "kube-api-access-nvbqt". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:14:49 crc kubenswrapper[5113]: I0130 00:14:49.829288 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-nvbqt\" (UniqueName: \"kubernetes.io/projected/28e0ef1a-f823-4898-90a3-66c67c5f19eb-kube-api-access-nvbqt\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:49 crc kubenswrapper[5113]: I0130 00:14:49.829335 5113 reconciler_common.go:299] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/28e0ef1a-f823-4898-90a3-66c67c5f19eb-serviceca\") on node \"crc\" DevicePath \"\"" Jan 30 00:14:50 crc kubenswrapper[5113]: I0130 00:14:50.356046 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29495520-b9gz9" event={"ID":"28e0ef1a-f823-4898-90a3-66c67c5f19eb","Type":"ContainerDied","Data":"4858e0bc850dd7c0be02a0f21b6e24d5fc618404624f988ad4cb17ecbfbbbae8"} Jan 30 00:14:50 crc kubenswrapper[5113]: I0130 00:14:50.356118 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4858e0bc850dd7c0be02a0f21b6e24d5fc618404624f988ad4cb17ecbfbbbae8" Jan 30 00:14:50 crc kubenswrapper[5113]: I0130 00:14:50.356079 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29495520-b9gz9" Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.197380 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8"] Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.199090 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="28e0ef1a-f823-4898-90a3-66c67c5f19eb" containerName="image-pruner" Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.199113 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="28e0ef1a-f823-4898-90a3-66c67c5f19eb" containerName="image-pruner" Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.199248 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="28e0ef1a-f823-4898-90a3-66c67c5f19eb" containerName="image-pruner" Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.206589 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8" Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.211322 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operator-lifecycle-manager\"/\"collect-profiles-config\"" Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.212708 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"collect-profiles-dockercfg-vfqp6\"" Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.222280 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8"] Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.300893 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwhrb\" (UniqueName: \"kubernetes.io/projected/0283c619-105f-42db-82a2-24b5f0088c04-kube-api-access-wwhrb\") pod \"collect-profiles-29495535-glbz8\" (UID: \"0283c619-105f-42db-82a2-24b5f0088c04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8" Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.300954 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0283c619-105f-42db-82a2-24b5f0088c04-secret-volume\") pod \"collect-profiles-29495535-glbz8\" (UID: \"0283c619-105f-42db-82a2-24b5f0088c04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8" Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.300989 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0283c619-105f-42db-82a2-24b5f0088c04-config-volume\") pod \"collect-profiles-29495535-glbz8\" (UID: \"0283c619-105f-42db-82a2-24b5f0088c04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8" Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.403276 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-wwhrb\" (UniqueName: \"kubernetes.io/projected/0283c619-105f-42db-82a2-24b5f0088c04-kube-api-access-wwhrb\") pod \"collect-profiles-29495535-glbz8\" (UID: \"0283c619-105f-42db-82a2-24b5f0088c04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8" Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.403347 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0283c619-105f-42db-82a2-24b5f0088c04-secret-volume\") pod \"collect-profiles-29495535-glbz8\" (UID: \"0283c619-105f-42db-82a2-24b5f0088c04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8" Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.403376 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0283c619-105f-42db-82a2-24b5f0088c04-config-volume\") pod \"collect-profiles-29495535-glbz8\" (UID: \"0283c619-105f-42db-82a2-24b5f0088c04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8" Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.404690 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0283c619-105f-42db-82a2-24b5f0088c04-config-volume\") pod \"collect-profiles-29495535-glbz8\" (UID: \"0283c619-105f-42db-82a2-24b5f0088c04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8" Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.417737 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0283c619-105f-42db-82a2-24b5f0088c04-secret-volume\") pod \"collect-profiles-29495535-glbz8\" (UID: \"0283c619-105f-42db-82a2-24b5f0088c04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8" Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.431071 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwhrb\" (UniqueName: \"kubernetes.io/projected/0283c619-105f-42db-82a2-24b5f0088c04-kube-api-access-wwhrb\") pod \"collect-profiles-29495535-glbz8\" (UID: \"0283c619-105f-42db-82a2-24b5f0088c04\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8" Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.527368 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8" Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.965098 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8"] Jan 30 00:15:00 crc kubenswrapper[5113]: I0130 00:15:00.979117 5113 provider.go:93] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 00:15:01 crc kubenswrapper[5113]: I0130 00:15:01.437132 5113 generic.go:358] "Generic (PLEG): container finished" podID="0283c619-105f-42db-82a2-24b5f0088c04" containerID="33f361cb0e56a4ec2cd5d37db7b75eeb834d06325718e13cc7b63a491b41da10" exitCode=0 Jan 30 00:15:01 crc kubenswrapper[5113]: I0130 00:15:01.437286 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8" event={"ID":"0283c619-105f-42db-82a2-24b5f0088c04","Type":"ContainerDied","Data":"33f361cb0e56a4ec2cd5d37db7b75eeb834d06325718e13cc7b63a491b41da10"} Jan 30 00:15:01 crc kubenswrapper[5113]: I0130 00:15:01.437828 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8" event={"ID":"0283c619-105f-42db-82a2-24b5f0088c04","Type":"ContainerStarted","Data":"f25dbd15cfa827d3a54d8e241dcfdfd2be0c71946571067f7c1b558a0c0a5821"} Jan 30 00:15:02 crc kubenswrapper[5113]: I0130 00:15:02.819663 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8" Jan 30 00:15:02 crc kubenswrapper[5113]: I0130 00:15:02.944195 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwhrb\" (UniqueName: \"kubernetes.io/projected/0283c619-105f-42db-82a2-24b5f0088c04-kube-api-access-wwhrb\") pod \"0283c619-105f-42db-82a2-24b5f0088c04\" (UID: \"0283c619-105f-42db-82a2-24b5f0088c04\") " Jan 30 00:15:02 crc kubenswrapper[5113]: I0130 00:15:02.944760 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0283c619-105f-42db-82a2-24b5f0088c04-config-volume\") pod \"0283c619-105f-42db-82a2-24b5f0088c04\" (UID: \"0283c619-105f-42db-82a2-24b5f0088c04\") " Jan 30 00:15:02 crc kubenswrapper[5113]: I0130 00:15:02.944780 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0283c619-105f-42db-82a2-24b5f0088c04-secret-volume\") pod \"0283c619-105f-42db-82a2-24b5f0088c04\" (UID: \"0283c619-105f-42db-82a2-24b5f0088c04\") " Jan 30 00:15:02 crc kubenswrapper[5113]: I0130 00:15:02.945928 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0283c619-105f-42db-82a2-24b5f0088c04-config-volume" (OuterVolumeSpecName: "config-volume") pod "0283c619-105f-42db-82a2-24b5f0088c04" (UID: "0283c619-105f-42db-82a2-24b5f0088c04"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:02 crc kubenswrapper[5113]: I0130 00:15:02.952696 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0283c619-105f-42db-82a2-24b5f0088c04-kube-api-access-wwhrb" (OuterVolumeSpecName: "kube-api-access-wwhrb") pod "0283c619-105f-42db-82a2-24b5f0088c04" (UID: "0283c619-105f-42db-82a2-24b5f0088c04"). InnerVolumeSpecName "kube-api-access-wwhrb". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:15:02 crc kubenswrapper[5113]: I0130 00:15:02.953430 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0283c619-105f-42db-82a2-24b5f0088c04-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0283c619-105f-42db-82a2-24b5f0088c04" (UID: "0283c619-105f-42db-82a2-24b5f0088c04"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:15:03 crc kubenswrapper[5113]: I0130 00:15:03.046231 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-wwhrb\" (UniqueName: \"kubernetes.io/projected/0283c619-105f-42db-82a2-24b5f0088c04-kube-api-access-wwhrb\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:03 crc kubenswrapper[5113]: I0130 00:15:03.046317 5113 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0283c619-105f-42db-82a2-24b5f0088c04-config-volume\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:03 crc kubenswrapper[5113]: I0130 00:15:03.046329 5113 reconciler_common.go:299] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0283c619-105f-42db-82a2-24b5f0088c04-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:03 crc kubenswrapper[5113]: I0130 00:15:03.456745 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8" event={"ID":"0283c619-105f-42db-82a2-24b5f0088c04","Type":"ContainerDied","Data":"f25dbd15cfa827d3a54d8e241dcfdfd2be0c71946571067f7c1b558a0c0a5821"} Jan 30 00:15:03 crc kubenswrapper[5113]: I0130 00:15:03.456836 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f25dbd15cfa827d3a54d8e241dcfdfd2be0c71946571067f7c1b558a0c0a5821" Jan 30 00:15:03 crc kubenswrapper[5113]: I0130 00:15:03.456772 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495535-glbz8" Jan 30 00:15:06 crc kubenswrapper[5113]: I0130 00:15:06.772085 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc"] Jan 30 00:15:06 crc kubenswrapper[5113]: I0130 00:15:06.773308 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" podUID="d5bd7cbd-4a52-4e40-8d87-72db36f52f14" containerName="route-controller-manager" containerID="cri-o://644d591aa00eba438d28bd05a407ddb2029e0c7838e613936c9332743260258e" gracePeriod=30 Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.217413 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.256678 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8"] Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.257570 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="0283c619-105f-42db-82a2-24b5f0088c04" containerName="collect-profiles" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.257593 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="0283c619-105f-42db-82a2-24b5f0088c04" containerName="collect-profiles" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.257806 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="d5bd7cbd-4a52-4e40-8d87-72db36f52f14" containerName="route-controller-manager" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.257820 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5bd7cbd-4a52-4e40-8d87-72db36f52f14" containerName="route-controller-manager" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.257949 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="d5bd7cbd-4a52-4e40-8d87-72db36f52f14" containerName="route-controller-manager" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.257968 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="0283c619-105f-42db-82a2-24b5f0088c04" containerName="collect-profiles" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.263514 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.265486 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8"] Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.315635 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-client-ca\") pod \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.315711 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-config\") pod \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.315827 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-tmp\") pod \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.315891 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-serving-cert\") pod \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.315939 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qmjk\" (UniqueName: \"kubernetes.io/projected/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-kube-api-access-2qmjk\") pod \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\" (UID: \"d5bd7cbd-4a52-4e40-8d87-72db36f52f14\") " Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.316126 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9kw4r\" (UniqueName: \"kubernetes.io/projected/077bd088-c59a-4165-bf02-8a47a5a88b8f-kube-api-access-9kw4r\") pod \"route-controller-manager-8647b66944-g87w8\" (UID: \"077bd088-c59a-4165-bf02-8a47a5a88b8f\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.316168 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/077bd088-c59a-4165-bf02-8a47a5a88b8f-client-ca\") pod \"route-controller-manager-8647b66944-g87w8\" (UID: \"077bd088-c59a-4165-bf02-8a47a5a88b8f\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.316197 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/077bd088-c59a-4165-bf02-8a47a5a88b8f-serving-cert\") pod \"route-controller-manager-8647b66944-g87w8\" (UID: \"077bd088-c59a-4165-bf02-8a47a5a88b8f\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.316288 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/077bd088-c59a-4165-bf02-8a47a5a88b8f-tmp\") pod \"route-controller-manager-8647b66944-g87w8\" (UID: \"077bd088-c59a-4165-bf02-8a47a5a88b8f\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.316377 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/077bd088-c59a-4165-bf02-8a47a5a88b8f-config\") pod \"route-controller-manager-8647b66944-g87w8\" (UID: \"077bd088-c59a-4165-bf02-8a47a5a88b8f\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.316692 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-tmp" (OuterVolumeSpecName: "tmp") pod "d5bd7cbd-4a52-4e40-8d87-72db36f52f14" (UID: "d5bd7cbd-4a52-4e40-8d87-72db36f52f14"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.316904 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-client-ca" (OuterVolumeSpecName: "client-ca") pod "d5bd7cbd-4a52-4e40-8d87-72db36f52f14" (UID: "d5bd7cbd-4a52-4e40-8d87-72db36f52f14"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.317008 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-config" (OuterVolumeSpecName: "config") pod "d5bd7cbd-4a52-4e40-8d87-72db36f52f14" (UID: "d5bd7cbd-4a52-4e40-8d87-72db36f52f14"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.326935 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d5bd7cbd-4a52-4e40-8d87-72db36f52f14" (UID: "d5bd7cbd-4a52-4e40-8d87-72db36f52f14"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.326958 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-kube-api-access-2qmjk" (OuterVolumeSpecName: "kube-api-access-2qmjk") pod "d5bd7cbd-4a52-4e40-8d87-72db36f52f14" (UID: "d5bd7cbd-4a52-4e40-8d87-72db36f52f14"). InnerVolumeSpecName "kube-api-access-2qmjk". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.417511 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-9kw4r\" (UniqueName: \"kubernetes.io/projected/077bd088-c59a-4165-bf02-8a47a5a88b8f-kube-api-access-9kw4r\") pod \"route-controller-manager-8647b66944-g87w8\" (UID: \"077bd088-c59a-4165-bf02-8a47a5a88b8f\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.417684 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/077bd088-c59a-4165-bf02-8a47a5a88b8f-client-ca\") pod \"route-controller-manager-8647b66944-g87w8\" (UID: \"077bd088-c59a-4165-bf02-8a47a5a88b8f\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.417729 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/077bd088-c59a-4165-bf02-8a47a5a88b8f-serving-cert\") pod \"route-controller-manager-8647b66944-g87w8\" (UID: \"077bd088-c59a-4165-bf02-8a47a5a88b8f\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.418204 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/077bd088-c59a-4165-bf02-8a47a5a88b8f-tmp\") pod \"route-controller-manager-8647b66944-g87w8\" (UID: \"077bd088-c59a-4165-bf02-8a47a5a88b8f\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.418348 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/077bd088-c59a-4165-bf02-8a47a5a88b8f-config\") pod \"route-controller-manager-8647b66944-g87w8\" (UID: \"077bd088-c59a-4165-bf02-8a47a5a88b8f\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.418569 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.418589 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-2qmjk\" (UniqueName: \"kubernetes.io/projected/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-kube-api-access-2qmjk\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.418606 5113 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.418615 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.418624 5113 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/d5bd7cbd-4a52-4e40-8d87-72db36f52f14-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.419158 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/077bd088-c59a-4165-bf02-8a47a5a88b8f-client-ca\") pod \"route-controller-manager-8647b66944-g87w8\" (UID: \"077bd088-c59a-4165-bf02-8a47a5a88b8f\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.419273 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/077bd088-c59a-4165-bf02-8a47a5a88b8f-tmp\") pod \"route-controller-manager-8647b66944-g87w8\" (UID: \"077bd088-c59a-4165-bf02-8a47a5a88b8f\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.419784 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/077bd088-c59a-4165-bf02-8a47a5a88b8f-config\") pod \"route-controller-manager-8647b66944-g87w8\" (UID: \"077bd088-c59a-4165-bf02-8a47a5a88b8f\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.424472 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/077bd088-c59a-4165-bf02-8a47a5a88b8f-serving-cert\") pod \"route-controller-manager-8647b66944-g87w8\" (UID: \"077bd088-c59a-4165-bf02-8a47a5a88b8f\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.437409 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-9kw4r\" (UniqueName: \"kubernetes.io/projected/077bd088-c59a-4165-bf02-8a47a5a88b8f-kube-api-access-9kw4r\") pod \"route-controller-manager-8647b66944-g87w8\" (UID: \"077bd088-c59a-4165-bf02-8a47a5a88b8f\") " pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.486596 5113 generic.go:358] "Generic (PLEG): container finished" podID="d5bd7cbd-4a52-4e40-8d87-72db36f52f14" containerID="644d591aa00eba438d28bd05a407ddb2029e0c7838e613936c9332743260258e" exitCode=0 Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.486670 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" event={"ID":"d5bd7cbd-4a52-4e40-8d87-72db36f52f14","Type":"ContainerDied","Data":"644d591aa00eba438d28bd05a407ddb2029e0c7838e613936c9332743260258e"} Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.486730 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.486752 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc" event={"ID":"d5bd7cbd-4a52-4e40-8d87-72db36f52f14","Type":"ContainerDied","Data":"3a232b016eb3994823da17d717253098cd0350e5248ca2abb861ab3c9cd0375a"} Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.486789 5113 scope.go:117] "RemoveContainer" containerID="644d591aa00eba438d28bd05a407ddb2029e0c7838e613936c9332743260258e" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.516056 5113 scope.go:117] "RemoveContainer" containerID="644d591aa00eba438d28bd05a407ddb2029e0c7838e613936c9332743260258e" Jan 30 00:15:07 crc kubenswrapper[5113]: E0130 00:15:07.516916 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"644d591aa00eba438d28bd05a407ddb2029e0c7838e613936c9332743260258e\": container with ID starting with 644d591aa00eba438d28bd05a407ddb2029e0c7838e613936c9332743260258e not found: ID does not exist" containerID="644d591aa00eba438d28bd05a407ddb2029e0c7838e613936c9332743260258e" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.516986 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"644d591aa00eba438d28bd05a407ddb2029e0c7838e613936c9332743260258e"} err="failed to get container status \"644d591aa00eba438d28bd05a407ddb2029e0c7838e613936c9332743260258e\": rpc error: code = NotFound desc = could not find container \"644d591aa00eba438d28bd05a407ddb2029e0c7838e613936c9332743260258e\": container with ID starting with 644d591aa00eba438d28bd05a407ddb2029e0c7838e613936c9332743260258e not found: ID does not exist" Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.529762 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc"] Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.535290 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5d697d675d-z4jdc"] Jan 30 00:15:07 crc kubenswrapper[5113]: I0130 00:15:07.584887 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:08 crc kubenswrapper[5113]: I0130 00:15:08.085403 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8"] Jan 30 00:15:08 crc kubenswrapper[5113]: I0130 00:15:08.497483 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" event={"ID":"077bd088-c59a-4165-bf02-8a47a5a88b8f","Type":"ContainerStarted","Data":"93ba793de0781fa65bdf76ff487893dde7bbf3da6a0fbfb1976e9303da09448f"} Jan 30 00:15:08 crc kubenswrapper[5113]: I0130 00:15:08.497580 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" event={"ID":"077bd088-c59a-4165-bf02-8a47a5a88b8f","Type":"ContainerStarted","Data":"322ccdc8b5833b8bab016cc037686b884a47829472349ff1ee093eb5ce56d4f7"} Jan 30 00:15:08 crc kubenswrapper[5113]: I0130 00:15:08.498025 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:08 crc kubenswrapper[5113]: I0130 00:15:08.521711 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" podStartSLOduration=2.521684267 podStartE2EDuration="2.521684267s" podCreationTimestamp="2026-01-30 00:15:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:15:08.518786257 +0000 UTC m=+328.591391674" watchObservedRunningTime="2026-01-30 00:15:08.521684267 +0000 UTC m=+328.594289684" Jan 30 00:15:08 crc kubenswrapper[5113]: I0130 00:15:08.786640 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5bd7cbd-4a52-4e40-8d87-72db36f52f14" path="/var/lib/kubelet/pods/d5bd7cbd-4a52-4e40-8d87-72db36f52f14/volumes" Jan 30 00:15:08 crc kubenswrapper[5113]: I0130 00:15:08.814287 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-8647b66944-g87w8" Jan 30 00:15:26 crc kubenswrapper[5113]: I0130 00:15:26.751097 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-8697b7fb76-f52js"] Jan 30 00:15:26 crc kubenswrapper[5113]: I0130 00:15:26.752340 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" podUID="041afcdf-8351-4a75-b316-bac199ef8240" containerName="controller-manager" containerID="cri-o://9d0e53c3385536563877bb6cce175765169513099c29159aa89cd32305eb3b0c" gracePeriod=30 Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.199080 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.235269 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99"] Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.235987 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="041afcdf-8351-4a75-b316-bac199ef8240" containerName="controller-manager" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.236011 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="041afcdf-8351-4a75-b316-bac199ef8240" containerName="controller-manager" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.236116 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="041afcdf-8351-4a75-b316-bac199ef8240" containerName="controller-manager" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.240097 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.264748 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99"] Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.309877 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/041afcdf-8351-4a75-b316-bac199ef8240-tmp\") pod \"041afcdf-8351-4a75-b316-bac199ef8240\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.310125 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-config\") pod \"041afcdf-8351-4a75-b316-bac199ef8240\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.310206 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-client-ca\") pod \"041afcdf-8351-4a75-b316-bac199ef8240\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.310229 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/041afcdf-8351-4a75-b316-bac199ef8240-serving-cert\") pod \"041afcdf-8351-4a75-b316-bac199ef8240\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.310308 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-proxy-ca-bundles\") pod \"041afcdf-8351-4a75-b316-bac199ef8240\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.310354 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqkwx\" (UniqueName: \"kubernetes.io/projected/041afcdf-8351-4a75-b316-bac199ef8240-kube-api-access-bqkwx\") pod \"041afcdf-8351-4a75-b316-bac199ef8240\" (UID: \"041afcdf-8351-4a75-b316-bac199ef8240\") " Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.310744 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/041afcdf-8351-4a75-b316-bac199ef8240-tmp" (OuterVolumeSpecName: "tmp") pod "041afcdf-8351-4a75-b316-bac199ef8240" (UID: "041afcdf-8351-4a75-b316-bac199ef8240"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.311105 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "041afcdf-8351-4a75-b316-bac199ef8240" (UID: "041afcdf-8351-4a75-b316-bac199ef8240"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.311373 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-config" (OuterVolumeSpecName: "config") pod "041afcdf-8351-4a75-b316-bac199ef8240" (UID: "041afcdf-8351-4a75-b316-bac199ef8240"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.312151 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-client-ca" (OuterVolumeSpecName: "client-ca") pod "041afcdf-8351-4a75-b316-bac199ef8240" (UID: "041afcdf-8351-4a75-b316-bac199ef8240"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.316671 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/041afcdf-8351-4a75-b316-bac199ef8240-kube-api-access-bqkwx" (OuterVolumeSpecName: "kube-api-access-bqkwx") pod "041afcdf-8351-4a75-b316-bac199ef8240" (UID: "041afcdf-8351-4a75-b316-bac199ef8240"). InnerVolumeSpecName "kube-api-access-bqkwx". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.317145 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/041afcdf-8351-4a75-b316-bac199ef8240-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "041afcdf-8351-4a75-b316-bac199ef8240" (UID: "041afcdf-8351-4a75-b316-bac199ef8240"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.411985 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-config\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.412193 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-serving-cert\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.412218 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vr56p\" (UniqueName: \"kubernetes.io/projected/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-kube-api-access-vr56p\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.412240 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-tmp\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.412286 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-proxy-ca-bundles\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.412307 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-client-ca\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.412351 5113 reconciler_common.go:299] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.412361 5113 reconciler_common.go:299] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.412370 5113 reconciler_common.go:299] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/041afcdf-8351-4a75-b316-bac199ef8240-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.412380 5113 reconciler_common.go:299] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/041afcdf-8351-4a75-b316-bac199ef8240-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.412390 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-bqkwx\" (UniqueName: \"kubernetes.io/projected/041afcdf-8351-4a75-b316-bac199ef8240-kube-api-access-bqkwx\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.412400 5113 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/041afcdf-8351-4a75-b316-bac199ef8240-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.514226 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-proxy-ca-bundles\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.514319 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-client-ca\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.514390 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-config\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.514458 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-serving-cert\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.514480 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-vr56p\" (UniqueName: \"kubernetes.io/projected/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-kube-api-access-vr56p\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.514515 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-tmp\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.515161 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-tmp\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.516181 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-client-ca\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.516297 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-config\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.516293 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-proxy-ca-bundles\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.520160 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-serving-cert\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.533031 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-vr56p\" (UniqueName: \"kubernetes.io/projected/443777cc-40d2-4ec3-8ac3-0b3b6e18f634-kube-api-access-vr56p\") pod \"controller-manager-5fbc6c9c4d-k6n99\" (UID: \"443777cc-40d2-4ec3-8ac3-0b3b6e18f634\") " pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.571657 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.626013 5113 generic.go:358] "Generic (PLEG): container finished" podID="041afcdf-8351-4a75-b316-bac199ef8240" containerID="9d0e53c3385536563877bb6cce175765169513099c29159aa89cd32305eb3b0c" exitCode=0 Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.626149 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" event={"ID":"041afcdf-8351-4a75-b316-bac199ef8240","Type":"ContainerDied","Data":"9d0e53c3385536563877bb6cce175765169513099c29159aa89cd32305eb3b0c"} Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.626209 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.626250 5113 scope.go:117] "RemoveContainer" containerID="9d0e53c3385536563877bb6cce175765169513099c29159aa89cd32305eb3b0c" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.626230 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8697b7fb76-f52js" event={"ID":"041afcdf-8351-4a75-b316-bac199ef8240","Type":"ContainerDied","Data":"52f329e5d5c1feaa57908165b5d0a341fa0d658704c28fc1755d47f4a2931de3"} Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.652390 5113 scope.go:117] "RemoveContainer" containerID="9d0e53c3385536563877bb6cce175765169513099c29159aa89cd32305eb3b0c" Jan 30 00:15:27 crc kubenswrapper[5113]: E0130 00:15:27.653029 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d0e53c3385536563877bb6cce175765169513099c29159aa89cd32305eb3b0c\": container with ID starting with 9d0e53c3385536563877bb6cce175765169513099c29159aa89cd32305eb3b0c not found: ID does not exist" containerID="9d0e53c3385536563877bb6cce175765169513099c29159aa89cd32305eb3b0c" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.653269 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d0e53c3385536563877bb6cce175765169513099c29159aa89cd32305eb3b0c"} err="failed to get container status \"9d0e53c3385536563877bb6cce175765169513099c29159aa89cd32305eb3b0c\": rpc error: code = NotFound desc = could not find container \"9d0e53c3385536563877bb6cce175765169513099c29159aa89cd32305eb3b0c\": container with ID starting with 9d0e53c3385536563877bb6cce175765169513099c29159aa89cd32305eb3b0c not found: ID does not exist" Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.676346 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-8697b7fb76-f52js"] Jan 30 00:15:27 crc kubenswrapper[5113]: I0130 00:15:27.679598 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-8697b7fb76-f52js"] Jan 30 00:15:28 crc kubenswrapper[5113]: W0130 00:15:28.093493 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod443777cc_40d2_4ec3_8ac3_0b3b6e18f634.slice/crio-d95c3f15e728a960c18e75a272c53c40e2fbb96b0801768b96885111e57a9f03 WatchSource:0}: Error finding container d95c3f15e728a960c18e75a272c53c40e2fbb96b0801768b96885111e57a9f03: Status 404 returned error can't find the container with id d95c3f15e728a960c18e75a272c53c40e2fbb96b0801768b96885111e57a9f03 Jan 30 00:15:28 crc kubenswrapper[5113]: I0130 00:15:28.094555 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99"] Jan 30 00:15:28 crc kubenswrapper[5113]: I0130 00:15:28.636560 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" event={"ID":"443777cc-40d2-4ec3-8ac3-0b3b6e18f634","Type":"ContainerStarted","Data":"20028202bdfe515fc9a91e9cfb74a4c7538264af8868714542e17cdfe1659a84"} Jan 30 00:15:28 crc kubenswrapper[5113]: I0130 00:15:28.637016 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:28 crc kubenswrapper[5113]: I0130 00:15:28.637031 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" event={"ID":"443777cc-40d2-4ec3-8ac3-0b3b6e18f634","Type":"ContainerStarted","Data":"d95c3f15e728a960c18e75a272c53c40e2fbb96b0801768b96885111e57a9f03"} Jan 30 00:15:28 crc kubenswrapper[5113]: I0130 00:15:28.659475 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" podStartSLOduration=2.659449436 podStartE2EDuration="2.659449436s" podCreationTimestamp="2026-01-30 00:15:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:15:28.659389004 +0000 UTC m=+348.731994381" watchObservedRunningTime="2026-01-30 00:15:28.659449436 +0000 UTC m=+348.732054823" Jan 30 00:15:28 crc kubenswrapper[5113]: I0130 00:15:28.780928 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="041afcdf-8351-4a75-b316-bac199ef8240" path="/var/lib/kubelet/pods/041afcdf-8351-4a75-b316-bac199ef8240/volumes" Jan 30 00:15:29 crc kubenswrapper[5113]: I0130 00:15:29.169673 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5fbc6c9c4d-k6n99" Jan 30 00:15:33 crc kubenswrapper[5113]: I0130 00:15:33.313764 5113 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 30 00:15:55 crc kubenswrapper[5113]: I0130 00:15:55.704711 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-znr9f"] Jan 30 00:15:55 crc kubenswrapper[5113]: I0130 00:15:55.706148 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-znr9f" podUID="2d28df1d-4619-45dc-8fee-b482cfad0ead" containerName="registry-server" containerID="cri-o://2396ab5eca03cea7f1c690d25238b8ee973aa1c7063801721ad583b4e980d0a4" gracePeriod=30 Jan 30 00:15:55 crc kubenswrapper[5113]: I0130 00:15:55.728631 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vjvch"] Jan 30 00:15:55 crc kubenswrapper[5113]: I0130 00:15:55.729897 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vjvch" podUID="5d9cf73a-1c55-49b9-9664-393fd1ea11ec" containerName="registry-server" containerID="cri-o://d117539f67b22b07be1f2b4407532487e5ad8c9ad46acc6b7d53dd8e33267b3b" gracePeriod=30 Jan 30 00:15:55 crc kubenswrapper[5113]: I0130 00:15:55.754902 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-qljgk"] Jan 30 00:15:55 crc kubenswrapper[5113]: I0130 00:15:55.755459 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" podUID="f7e91b03-5282-4f6e-8ed2-a44afa3fc350" containerName="marketplace-operator" containerID="cri-o://eb82d61e9d30b32d41beb3437db5fc66e5f4749f7aa3e2448a0f58cd81714364" gracePeriod=30 Jan 30 00:15:55 crc kubenswrapper[5113]: I0130 00:15:55.775393 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gv2cj"] Jan 30 00:15:55 crc kubenswrapper[5113]: I0130 00:15:55.776201 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gv2cj" podUID="a527979f-66df-45e5-a643-2c2ebc9fc7c6" containerName="registry-server" containerID="cri-o://dd7b1883507c1da7da97c2a27d6549d626755ed1da21a9abdf3daf74ba3c5d86" gracePeriod=30 Jan 30 00:15:55 crc kubenswrapper[5113]: I0130 00:15:55.788401 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tvfsp"] Jan 30 00:15:55 crc kubenswrapper[5113]: I0130 00:15:55.788846 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tvfsp" podUID="84d95933-fc27-469e-91be-cb781299ccd2" containerName="registry-server" containerID="cri-o://73ea884e064974e952bf9033ecb3e45f1395565f2db1ec0617c3e70ef8bcbba1" gracePeriod=30 Jan 30 00:15:55 crc kubenswrapper[5113]: I0130 00:15:55.795965 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-6nt5s"] Jan 30 00:15:55 crc kubenswrapper[5113]: I0130 00:15:55.800834 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" Jan 30 00:15:55 crc kubenswrapper[5113]: I0130 00:15:55.802995 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-6nt5s"] Jan 30 00:15:55 crc kubenswrapper[5113]: I0130 00:15:55.972680 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/9da74617-052a-44f4-a53c-b8e8ca99f9da-tmp\") pod \"marketplace-operator-547dbd544d-6nt5s\" (UID: \"9da74617-052a-44f4-a53c-b8e8ca99f9da\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" Jan 30 00:15:55 crc kubenswrapper[5113]: I0130 00:15:55.972754 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9da74617-052a-44f4-a53c-b8e8ca99f9da-marketplace-operator-metrics\") pod \"marketplace-operator-547dbd544d-6nt5s\" (UID: \"9da74617-052a-44f4-a53c-b8e8ca99f9da\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" Jan 30 00:15:55 crc kubenswrapper[5113]: I0130 00:15:55.972792 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9da74617-052a-44f4-a53c-b8e8ca99f9da-marketplace-trusted-ca\") pod \"marketplace-operator-547dbd544d-6nt5s\" (UID: \"9da74617-052a-44f4-a53c-b8e8ca99f9da\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" Jan 30 00:15:55 crc kubenswrapper[5113]: I0130 00:15:55.972811 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjlgq\" (UniqueName: \"kubernetes.io/projected/9da74617-052a-44f4-a53c-b8e8ca99f9da-kube-api-access-xjlgq\") pod \"marketplace-operator-547dbd544d-6nt5s\" (UID: \"9da74617-052a-44f4-a53c-b8e8ca99f9da\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.529887 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9da74617-052a-44f4-a53c-b8e8ca99f9da-marketplace-operator-metrics\") pod \"marketplace-operator-547dbd544d-6nt5s\" (UID: \"9da74617-052a-44f4-a53c-b8e8ca99f9da\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.529957 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9da74617-052a-44f4-a53c-b8e8ca99f9da-marketplace-trusted-ca\") pod \"marketplace-operator-547dbd544d-6nt5s\" (UID: \"9da74617-052a-44f4-a53c-b8e8ca99f9da\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.529978 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-xjlgq\" (UniqueName: \"kubernetes.io/projected/9da74617-052a-44f4-a53c-b8e8ca99f9da-kube-api-access-xjlgq\") pod \"marketplace-operator-547dbd544d-6nt5s\" (UID: \"9da74617-052a-44f4-a53c-b8e8ca99f9da\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.530034 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/9da74617-052a-44f4-a53c-b8e8ca99f9da-tmp\") pod \"marketplace-operator-547dbd544d-6nt5s\" (UID: \"9da74617-052a-44f4-a53c-b8e8ca99f9da\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.530880 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/9da74617-052a-44f4-a53c-b8e8ca99f9da-tmp\") pod \"marketplace-operator-547dbd544d-6nt5s\" (UID: \"9da74617-052a-44f4-a53c-b8e8ca99f9da\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.533451 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9da74617-052a-44f4-a53c-b8e8ca99f9da-marketplace-trusted-ca\") pod \"marketplace-operator-547dbd544d-6nt5s\" (UID: \"9da74617-052a-44f4-a53c-b8e8ca99f9da\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.542157 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/9da74617-052a-44f4-a53c-b8e8ca99f9da-marketplace-operator-metrics\") pod \"marketplace-operator-547dbd544d-6nt5s\" (UID: \"9da74617-052a-44f4-a53c-b8e8ca99f9da\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.554716 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjlgq\" (UniqueName: \"kubernetes.io/projected/9da74617-052a-44f4-a53c-b8e8ca99f9da-kube-api-access-xjlgq\") pod \"marketplace-operator-547dbd544d-6nt5s\" (UID: \"9da74617-052a-44f4-a53c-b8e8ca99f9da\") " pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.636699 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.738151 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-znr9f" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.845729 5113 generic.go:358] "Generic (PLEG): container finished" podID="2d28df1d-4619-45dc-8fee-b482cfad0ead" containerID="2396ab5eca03cea7f1c690d25238b8ee973aa1c7063801721ad583b4e980d0a4" exitCode=0 Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.845850 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-znr9f" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.845873 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-znr9f" event={"ID":"2d28df1d-4619-45dc-8fee-b482cfad0ead","Type":"ContainerDied","Data":"2396ab5eca03cea7f1c690d25238b8ee973aa1c7063801721ad583b4e980d0a4"} Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.845916 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-znr9f" event={"ID":"2d28df1d-4619-45dc-8fee-b482cfad0ead","Type":"ContainerDied","Data":"40be437a7dc5c9f625e23929fb2ebfb7caf3abaa47c733873e62abe6e549f488"} Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.845940 5113 scope.go:117] "RemoveContainer" containerID="2396ab5eca03cea7f1c690d25238b8ee973aa1c7063801721ad583b4e980d0a4" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.852222 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hxgh\" (UniqueName: \"kubernetes.io/projected/2d28df1d-4619-45dc-8fee-b482cfad0ead-kube-api-access-7hxgh\") pod \"2d28df1d-4619-45dc-8fee-b482cfad0ead\" (UID: \"2d28df1d-4619-45dc-8fee-b482cfad0ead\") " Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.852341 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d28df1d-4619-45dc-8fee-b482cfad0ead-utilities\") pod \"2d28df1d-4619-45dc-8fee-b482cfad0ead\" (UID: \"2d28df1d-4619-45dc-8fee-b482cfad0ead\") " Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.852494 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d28df1d-4619-45dc-8fee-b482cfad0ead-catalog-content\") pod \"2d28df1d-4619-45dc-8fee-b482cfad0ead\" (UID: \"2d28df1d-4619-45dc-8fee-b482cfad0ead\") " Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.859598 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d28df1d-4619-45dc-8fee-b482cfad0ead-utilities" (OuterVolumeSpecName: "utilities") pod "2d28df1d-4619-45dc-8fee-b482cfad0ead" (UID: "2d28df1d-4619-45dc-8fee-b482cfad0ead"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.863734 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d28df1d-4619-45dc-8fee-b482cfad0ead-kube-api-access-7hxgh" (OuterVolumeSpecName: "kube-api-access-7hxgh") pod "2d28df1d-4619-45dc-8fee-b482cfad0ead" (UID: "2d28df1d-4619-45dc-8fee-b482cfad0ead"). InnerVolumeSpecName "kube-api-access-7hxgh". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.867178 5113 generic.go:358] "Generic (PLEG): container finished" podID="a527979f-66df-45e5-a643-2c2ebc9fc7c6" containerID="dd7b1883507c1da7da97c2a27d6549d626755ed1da21a9abdf3daf74ba3c5d86" exitCode=0 Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.867339 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gv2cj" event={"ID":"a527979f-66df-45e5-a643-2c2ebc9fc7c6","Type":"ContainerDied","Data":"dd7b1883507c1da7da97c2a27d6549d626755ed1da21a9abdf3daf74ba3c5d86"} Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.880048 5113 generic.go:358] "Generic (PLEG): container finished" podID="5d9cf73a-1c55-49b9-9664-393fd1ea11ec" containerID="d117539f67b22b07be1f2b4407532487e5ad8c9ad46acc6b7d53dd8e33267b3b" exitCode=0 Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.880128 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vjvch" event={"ID":"5d9cf73a-1c55-49b9-9664-393fd1ea11ec","Type":"ContainerDied","Data":"d117539f67b22b07be1f2b4407532487e5ad8c9ad46acc6b7d53dd8e33267b3b"} Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.883176 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.896307 5113 generic.go:358] "Generic (PLEG): container finished" podID="f7e91b03-5282-4f6e-8ed2-a44afa3fc350" containerID="eb82d61e9d30b32d41beb3437db5fc66e5f4749f7aa3e2448a0f58cd81714364" exitCode=0 Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.896450 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" event={"ID":"f7e91b03-5282-4f6e-8ed2-a44afa3fc350","Type":"ContainerDied","Data":"eb82d61e9d30b32d41beb3437db5fc66e5f4749f7aa3e2448a0f58cd81714364"} Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.908225 5113 generic.go:358] "Generic (PLEG): container finished" podID="84d95933-fc27-469e-91be-cb781299ccd2" containerID="73ea884e064974e952bf9033ecb3e45f1395565f2db1ec0617c3e70ef8bcbba1" exitCode=0 Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.908270 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvfsp" event={"ID":"84d95933-fc27-469e-91be-cb781299ccd2","Type":"ContainerDied","Data":"73ea884e064974e952bf9033ecb3e45f1395565f2db1ec0617c3e70ef8bcbba1"} Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.911690 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d28df1d-4619-45dc-8fee-b482cfad0ead-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2d28df1d-4619-45dc-8fee-b482cfad0ead" (UID: "2d28df1d-4619-45dc-8fee-b482cfad0ead"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.921821 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gv2cj" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.933433 5113 scope.go:117] "RemoveContainer" containerID="74e27cd321138db721772adff9db44767f09ca94a735d9afac4a8efb1f46331d" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.955090 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-marketplace-trusted-ca\") pod \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\" (UID: \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\") " Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.955186 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-tmp\") pod \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\" (UID: \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\") " Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.955266 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6fwsm\" (UniqueName: \"kubernetes.io/projected/a527979f-66df-45e5-a643-2c2ebc9fc7c6-kube-api-access-6fwsm\") pod \"a527979f-66df-45e5-a643-2c2ebc9fc7c6\" (UID: \"a527979f-66df-45e5-a643-2c2ebc9fc7c6\") " Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.955319 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-marketplace-operator-metrics\") pod \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\" (UID: \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\") " Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.955366 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a527979f-66df-45e5-a643-2c2ebc9fc7c6-utilities\") pod \"a527979f-66df-45e5-a643-2c2ebc9fc7c6\" (UID: \"a527979f-66df-45e5-a643-2c2ebc9fc7c6\") " Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.955414 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a527979f-66df-45e5-a643-2c2ebc9fc7c6-catalog-content\") pod \"a527979f-66df-45e5-a643-2c2ebc9fc7c6\" (UID: \"a527979f-66df-45e5-a643-2c2ebc9fc7c6\") " Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.955463 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8cj4s\" (UniqueName: \"kubernetes.io/projected/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-kube-api-access-8cj4s\") pod \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\" (UID: \"f7e91b03-5282-4f6e-8ed2-a44afa3fc350\") " Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.955770 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-tmp" (OuterVolumeSpecName: "tmp") pod "f7e91b03-5282-4f6e-8ed2-a44afa3fc350" (UID: "f7e91b03-5282-4f6e-8ed2-a44afa3fc350"). InnerVolumeSpecName "tmp". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.956862 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "f7e91b03-5282-4f6e-8ed2-a44afa3fc350" (UID: "f7e91b03-5282-4f6e-8ed2-a44afa3fc350"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.956999 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a527979f-66df-45e5-a643-2c2ebc9fc7c6-utilities" (OuterVolumeSpecName: "utilities") pod "a527979f-66df-45e5-a643-2c2ebc9fc7c6" (UID: "a527979f-66df-45e5-a643-2c2ebc9fc7c6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.957177 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d28df1d-4619-45dc-8fee-b482cfad0ead-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.957190 5113 reconciler_common.go:299] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.957200 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d28df1d-4619-45dc-8fee-b482cfad0ead-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.957230 5113 reconciler_common.go:299] "Volume detached for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-tmp\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.957241 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a527979f-66df-45e5-a643-2c2ebc9fc7c6-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.957251 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-7hxgh\" (UniqueName: \"kubernetes.io/projected/2d28df1d-4619-45dc-8fee-b482cfad0ead-kube-api-access-7hxgh\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.961320 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "f7e91b03-5282-4f6e-8ed2-a44afa3fc350" (UID: "f7e91b03-5282-4f6e-8ed2-a44afa3fc350"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.961937 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-kube-api-access-8cj4s" (OuterVolumeSpecName: "kube-api-access-8cj4s") pod "f7e91b03-5282-4f6e-8ed2-a44afa3fc350" (UID: "f7e91b03-5282-4f6e-8ed2-a44afa3fc350"). InnerVolumeSpecName "kube-api-access-8cj4s". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.961980 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a527979f-66df-45e5-a643-2c2ebc9fc7c6-kube-api-access-6fwsm" (OuterVolumeSpecName: "kube-api-access-6fwsm") pod "a527979f-66df-45e5-a643-2c2ebc9fc7c6" (UID: "a527979f-66df-45e5-a643-2c2ebc9fc7c6"). InnerVolumeSpecName "kube-api-access-6fwsm". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:15:56 crc kubenswrapper[5113]: I0130 00:15:56.975715 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a527979f-66df-45e5-a643-2c2ebc9fc7c6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a527979f-66df-45e5-a643-2c2ebc9fc7c6" (UID: "a527979f-66df-45e5-a643-2c2ebc9fc7c6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.002809 5113 scope.go:117] "RemoveContainer" containerID="4166c8f4a2a8e784d449476671b8081adec9edad3e6efa3d3c03ec0e7552e4cc" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.014036 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vjvch" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.017845 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvfsp" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.023021 5113 scope.go:117] "RemoveContainer" containerID="2396ab5eca03cea7f1c690d25238b8ee973aa1c7063801721ad583b4e980d0a4" Jan 30 00:15:57 crc kubenswrapper[5113]: E0130 00:15:57.023610 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2396ab5eca03cea7f1c690d25238b8ee973aa1c7063801721ad583b4e980d0a4\": container with ID starting with 2396ab5eca03cea7f1c690d25238b8ee973aa1c7063801721ad583b4e980d0a4 not found: ID does not exist" containerID="2396ab5eca03cea7f1c690d25238b8ee973aa1c7063801721ad583b4e980d0a4" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.023664 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2396ab5eca03cea7f1c690d25238b8ee973aa1c7063801721ad583b4e980d0a4"} err="failed to get container status \"2396ab5eca03cea7f1c690d25238b8ee973aa1c7063801721ad583b4e980d0a4\": rpc error: code = NotFound desc = could not find container \"2396ab5eca03cea7f1c690d25238b8ee973aa1c7063801721ad583b4e980d0a4\": container with ID starting with 2396ab5eca03cea7f1c690d25238b8ee973aa1c7063801721ad583b4e980d0a4 not found: ID does not exist" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.023694 5113 scope.go:117] "RemoveContainer" containerID="74e27cd321138db721772adff9db44767f09ca94a735d9afac4a8efb1f46331d" Jan 30 00:15:57 crc kubenswrapper[5113]: E0130 00:15:57.024080 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74e27cd321138db721772adff9db44767f09ca94a735d9afac4a8efb1f46331d\": container with ID starting with 74e27cd321138db721772adff9db44767f09ca94a735d9afac4a8efb1f46331d not found: ID does not exist" containerID="74e27cd321138db721772adff9db44767f09ca94a735d9afac4a8efb1f46331d" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.024130 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74e27cd321138db721772adff9db44767f09ca94a735d9afac4a8efb1f46331d"} err="failed to get container status \"74e27cd321138db721772adff9db44767f09ca94a735d9afac4a8efb1f46331d\": rpc error: code = NotFound desc = could not find container \"74e27cd321138db721772adff9db44767f09ca94a735d9afac4a8efb1f46331d\": container with ID starting with 74e27cd321138db721772adff9db44767f09ca94a735d9afac4a8efb1f46331d not found: ID does not exist" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.024156 5113 scope.go:117] "RemoveContainer" containerID="4166c8f4a2a8e784d449476671b8081adec9edad3e6efa3d3c03ec0e7552e4cc" Jan 30 00:15:57 crc kubenswrapper[5113]: E0130 00:15:57.024690 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4166c8f4a2a8e784d449476671b8081adec9edad3e6efa3d3c03ec0e7552e4cc\": container with ID starting with 4166c8f4a2a8e784d449476671b8081adec9edad3e6efa3d3c03ec0e7552e4cc not found: ID does not exist" containerID="4166c8f4a2a8e784d449476671b8081adec9edad3e6efa3d3c03ec0e7552e4cc" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.024713 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4166c8f4a2a8e784d449476671b8081adec9edad3e6efa3d3c03ec0e7552e4cc"} err="failed to get container status \"4166c8f4a2a8e784d449476671b8081adec9edad3e6efa3d3c03ec0e7552e4cc\": rpc error: code = NotFound desc = could not find container \"4166c8f4a2a8e784d449476671b8081adec9edad3e6efa3d3c03ec0e7552e4cc\": container with ID starting with 4166c8f4a2a8e784d449476671b8081adec9edad3e6efa3d3c03ec0e7552e4cc not found: ID does not exist" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.024725 5113 scope.go:117] "RemoveContainer" containerID="eb82d61e9d30b32d41beb3437db5fc66e5f4749f7aa3e2448a0f58cd81714364" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.062008 5113 scope.go:117] "RemoveContainer" containerID="567bebbbe1416f08b7f812e6bca9ea03e75327b2ea279303897af85a298d51bb" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.062604 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-catalog-content\") pod \"5d9cf73a-1c55-49b9-9664-393fd1ea11ec\" (UID: \"5d9cf73a-1c55-49b9-9664-393fd1ea11ec\") " Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.062703 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84d95933-fc27-469e-91be-cb781299ccd2-utilities\") pod \"84d95933-fc27-469e-91be-cb781299ccd2\" (UID: \"84d95933-fc27-469e-91be-cb781299ccd2\") " Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.062859 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sxcq8\" (UniqueName: \"kubernetes.io/projected/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-kube-api-access-sxcq8\") pod \"5d9cf73a-1c55-49b9-9664-393fd1ea11ec\" (UID: \"5d9cf73a-1c55-49b9-9664-393fd1ea11ec\") " Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.062889 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-utilities\") pod \"5d9cf73a-1c55-49b9-9664-393fd1ea11ec\" (UID: \"5d9cf73a-1c55-49b9-9664-393fd1ea11ec\") " Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.062950 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84d95933-fc27-469e-91be-cb781299ccd2-catalog-content\") pod \"84d95933-fc27-469e-91be-cb781299ccd2\" (UID: \"84d95933-fc27-469e-91be-cb781299ccd2\") " Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.063003 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4tpq7\" (UniqueName: \"kubernetes.io/projected/84d95933-fc27-469e-91be-cb781299ccd2-kube-api-access-4tpq7\") pod \"84d95933-fc27-469e-91be-cb781299ccd2\" (UID: \"84d95933-fc27-469e-91be-cb781299ccd2\") " Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.064166 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-utilities" (OuterVolumeSpecName: "utilities") pod "5d9cf73a-1c55-49b9-9664-393fd1ea11ec" (UID: "5d9cf73a-1c55-49b9-9664-393fd1ea11ec"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.064767 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84d95933-fc27-469e-91be-cb781299ccd2-utilities" (OuterVolumeSpecName: "utilities") pod "84d95933-fc27-469e-91be-cb781299ccd2" (UID: "84d95933-fc27-469e-91be-cb781299ccd2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.067475 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-kube-api-access-sxcq8" (OuterVolumeSpecName: "kube-api-access-sxcq8") pod "5d9cf73a-1c55-49b9-9664-393fd1ea11ec" (UID: "5d9cf73a-1c55-49b9-9664-393fd1ea11ec"). InnerVolumeSpecName "kube-api-access-sxcq8". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.074657 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84d95933-fc27-469e-91be-cb781299ccd2-kube-api-access-4tpq7" (OuterVolumeSpecName: "kube-api-access-4tpq7") pod "84d95933-fc27-469e-91be-cb781299ccd2" (UID: "84d95933-fc27-469e-91be-cb781299ccd2"). InnerVolumeSpecName "kube-api-access-4tpq7". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.083617 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-4tpq7\" (UniqueName: \"kubernetes.io/projected/84d95933-fc27-469e-91be-cb781299ccd2-kube-api-access-4tpq7\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.083645 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84d95933-fc27-469e-91be-cb781299ccd2-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.083656 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-6fwsm\" (UniqueName: \"kubernetes.io/projected/a527979f-66df-45e5-a643-2c2ebc9fc7c6-kube-api-access-6fwsm\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.083666 5113 reconciler_common.go:299] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.083678 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-sxcq8\" (UniqueName: \"kubernetes.io/projected/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-kube-api-access-sxcq8\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.083687 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.083695 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a527979f-66df-45e5-a643-2c2ebc9fc7c6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.083704 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-8cj4s\" (UniqueName: \"kubernetes.io/projected/f7e91b03-5282-4f6e-8ed2-a44afa3fc350-kube-api-access-8cj4s\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.122947 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5d9cf73a-1c55-49b9-9664-393fd1ea11ec" (UID: "5d9cf73a-1c55-49b9-9664-393fd1ea11ec"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.178925 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-6nt5s"] Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.184083 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84d95933-fc27-469e-91be-cb781299ccd2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "84d95933-fc27-469e-91be-cb781299ccd2" (UID: "84d95933-fc27-469e-91be-cb781299ccd2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.184623 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-znr9f"] Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.184862 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84d95933-fc27-469e-91be-cb781299ccd2-catalog-content\") pod \"84d95933-fc27-469e-91be-cb781299ccd2\" (UID: \"84d95933-fc27-469e-91be-cb781299ccd2\") " Jan 30 00:15:57 crc kubenswrapper[5113]: W0130 00:15:57.185153 5113 empty_dir.go:511] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/84d95933-fc27-469e-91be-cb781299ccd2/volumes/kubernetes.io~empty-dir/catalog-content Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.185186 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84d95933-fc27-469e-91be-cb781299ccd2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "84d95933-fc27-469e-91be-cb781299ccd2" (UID: "84d95933-fc27-469e-91be-cb781299ccd2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.185583 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84d95933-fc27-469e-91be-cb781299ccd2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.185612 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5d9cf73a-1c55-49b9-9664-393fd1ea11ec-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.188121 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-znr9f"] Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.916334 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" event={"ID":"f7e91b03-5282-4f6e-8ed2-a44afa3fc350","Type":"ContainerDied","Data":"0a28b1aeae4e9c87f2f10a4a8902ce0f4eecdee7012990904b8c335713f17280"} Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.916496 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-547dbd544d-qljgk" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.919387 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tvfsp" event={"ID":"84d95933-fc27-469e-91be-cb781299ccd2","Type":"ContainerDied","Data":"d7e6383266e4008416803bcf65d982846013bf77401749f629fba69de42ec7b1"} Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.919462 5113 scope.go:117] "RemoveContainer" containerID="73ea884e064974e952bf9033ecb3e45f1395565f2db1ec0617c3e70ef8bcbba1" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.919657 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tvfsp" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.929928 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gv2cj" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.930073 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gv2cj" event={"ID":"a527979f-66df-45e5-a643-2c2ebc9fc7c6","Type":"ContainerDied","Data":"27f1936deae4b5d9a448791023c46364d6b866267f52a1eb79b78fccdd7c65ac"} Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.935152 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vjvch" event={"ID":"5d9cf73a-1c55-49b9-9664-393fd1ea11ec","Type":"ContainerDied","Data":"eb6d91bfd39147575a59dc2b1f4aa4669d2efc7fccb782668ce066e65af9c905"} Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.935364 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vjvch" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.937212 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" event={"ID":"9da74617-052a-44f4-a53c-b8e8ca99f9da","Type":"ContainerStarted","Data":"f24f06172a067733b51c185f1061a9aebe8d5cd3e02d6df878e9ed621662f0e9"} Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.937250 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" event={"ID":"9da74617-052a-44f4-a53c-b8e8ca99f9da","Type":"ContainerStarted","Data":"b09cdf4179b57fc55ef07ec1a48256d29a694df059d1d68e30a89a63a4a59085"} Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.937431 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.943724 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.949050 5113 scope.go:117] "RemoveContainer" containerID="0fe8f043b7da890fabd2f0313e23cd5e3dacf910e0aa04e141c9b4e15c045b0d" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.967080 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-547dbd544d-6nt5s" podStartSLOduration=2.967049355 podStartE2EDuration="2.967049355s" podCreationTimestamp="2026-01-30 00:15:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:15:57.959129 +0000 UTC m=+378.031734397" watchObservedRunningTime="2026-01-30 00:15:57.967049355 +0000 UTC m=+378.039654742" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.992546 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tvfsp"] Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.995045 5113 scope.go:117] "RemoveContainer" containerID="c655e0adbdb502a98eee43fda17401c76f449dfd25e615dd826a6c96e4b83084" Jan 30 00:15:57 crc kubenswrapper[5113]: I0130 00:15:57.996562 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tvfsp"] Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.019609 5113 scope.go:117] "RemoveContainer" containerID="dd7b1883507c1da7da97c2a27d6549d626755ed1da21a9abdf3daf74ba3c5d86" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.042436 5113 scope.go:117] "RemoveContainer" containerID="de0a54b6b9f26cb97b7f475dcb22083112421174123bb8f78562d372be77cb31" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.042716 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-qljgk"] Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.047016 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-547dbd544d-qljgk"] Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.053059 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vjvch"] Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.060665 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vjvch"] Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.066620 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gv2cj"] Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.071376 5113 scope.go:117] "RemoveContainer" containerID="354f34ce988303e83d4b70d0ac14bc66aae6dd5ab0d5aa23df3fb842c6c55cd0" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.087789 5113 scope.go:117] "RemoveContainer" containerID="d117539f67b22b07be1f2b4407532487e5ad8c9ad46acc6b7d53dd8e33267b3b" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.088679 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gv2cj"] Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.107462 5113 scope.go:117] "RemoveContainer" containerID="03a83c31fd5983a828dd0510903bb4b35996bc449640d20b1881e369ff5553f8" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.137505 5113 scope.go:117] "RemoveContainer" containerID="46fee9fbba0f7282fcae39a5f0c40f093eec48b1d3c8a960ad85f4a6ccf589ab" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.528767 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-zw2z8"] Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531084 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="2d28df1d-4619-45dc-8fee-b482cfad0ead" containerName="extract-content" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531118 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d28df1d-4619-45dc-8fee-b482cfad0ead" containerName="extract-content" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531147 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="2d28df1d-4619-45dc-8fee-b482cfad0ead" containerName="registry-server" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531157 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d28df1d-4619-45dc-8fee-b482cfad0ead" containerName="registry-server" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531170 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="5d9cf73a-1c55-49b9-9664-393fd1ea11ec" containerName="extract-content" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531179 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d9cf73a-1c55-49b9-9664-393fd1ea11ec" containerName="extract-content" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531204 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="5d9cf73a-1c55-49b9-9664-393fd1ea11ec" containerName="extract-utilities" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531212 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d9cf73a-1c55-49b9-9664-393fd1ea11ec" containerName="extract-utilities" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531233 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="2d28df1d-4619-45dc-8fee-b482cfad0ead" containerName="extract-utilities" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531243 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d28df1d-4619-45dc-8fee-b482cfad0ead" containerName="extract-utilities" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531255 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="5d9cf73a-1c55-49b9-9664-393fd1ea11ec" containerName="registry-server" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531263 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d9cf73a-1c55-49b9-9664-393fd1ea11ec" containerName="registry-server" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531275 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="84d95933-fc27-469e-91be-cb781299ccd2" containerName="extract-utilities" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531283 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="84d95933-fc27-469e-91be-cb781299ccd2" containerName="extract-utilities" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531702 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="84d95933-fc27-469e-91be-cb781299ccd2" containerName="registry-server" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531727 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="84d95933-fc27-469e-91be-cb781299ccd2" containerName="registry-server" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531746 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="f7e91b03-5282-4f6e-8ed2-a44afa3fc350" containerName="marketplace-operator" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531759 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7e91b03-5282-4f6e-8ed2-a44afa3fc350" containerName="marketplace-operator" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.531774 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="a527979f-66df-45e5-a643-2c2ebc9fc7c6" containerName="registry-server" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.532089 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="a527979f-66df-45e5-a643-2c2ebc9fc7c6" containerName="registry-server" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.532106 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="f7e91b03-5282-4f6e-8ed2-a44afa3fc350" containerName="marketplace-operator" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.532115 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7e91b03-5282-4f6e-8ed2-a44afa3fc350" containerName="marketplace-operator" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.532150 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="84d95933-fc27-469e-91be-cb781299ccd2" containerName="extract-content" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.532160 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="84d95933-fc27-469e-91be-cb781299ccd2" containerName="extract-content" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.532177 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="a527979f-66df-45e5-a643-2c2ebc9fc7c6" containerName="extract-utilities" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.532187 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="a527979f-66df-45e5-a643-2c2ebc9fc7c6" containerName="extract-utilities" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.532197 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="a527979f-66df-45e5-a643-2c2ebc9fc7c6" containerName="extract-content" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.532205 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="a527979f-66df-45e5-a643-2c2ebc9fc7c6" containerName="extract-content" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.532355 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="5d9cf73a-1c55-49b9-9664-393fd1ea11ec" containerName="registry-server" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.532376 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="f7e91b03-5282-4f6e-8ed2-a44afa3fc350" containerName="marketplace-operator" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.532394 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="84d95933-fc27-469e-91be-cb781299ccd2" containerName="registry-server" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.532409 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="a527979f-66df-45e5-a643-2c2ebc9fc7c6" containerName="registry-server" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.532422 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="2d28df1d-4619-45dc-8fee-b482cfad0ead" containerName="registry-server" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.532818 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="f7e91b03-5282-4f6e-8ed2-a44afa3fc350" containerName="marketplace-operator" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.547084 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zw2z8"] Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.547410 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zw2z8" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.553451 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"redhat-marketplace-dockercfg-gg4w7\"" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.630972 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/909f168f-fd5f-4b75-909e-514deaea8397-utilities\") pod \"redhat-marketplace-zw2z8\" (UID: \"909f168f-fd5f-4b75-909e-514deaea8397\") " pod="openshift-marketplace/redhat-marketplace-zw2z8" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.631377 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/909f168f-fd5f-4b75-909e-514deaea8397-catalog-content\") pod \"redhat-marketplace-zw2z8\" (UID: \"909f168f-fd5f-4b75-909e-514deaea8397\") " pod="openshift-marketplace/redhat-marketplace-zw2z8" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.631465 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x85hv\" (UniqueName: \"kubernetes.io/projected/909f168f-fd5f-4b75-909e-514deaea8397-kube-api-access-x85hv\") pod \"redhat-marketplace-zw2z8\" (UID: \"909f168f-fd5f-4b75-909e-514deaea8397\") " pod="openshift-marketplace/redhat-marketplace-zw2z8" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.733142 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/909f168f-fd5f-4b75-909e-514deaea8397-catalog-content\") pod \"redhat-marketplace-zw2z8\" (UID: \"909f168f-fd5f-4b75-909e-514deaea8397\") " pod="openshift-marketplace/redhat-marketplace-zw2z8" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.733647 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-x85hv\" (UniqueName: \"kubernetes.io/projected/909f168f-fd5f-4b75-909e-514deaea8397-kube-api-access-x85hv\") pod \"redhat-marketplace-zw2z8\" (UID: \"909f168f-fd5f-4b75-909e-514deaea8397\") " pod="openshift-marketplace/redhat-marketplace-zw2z8" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.733746 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/909f168f-fd5f-4b75-909e-514deaea8397-utilities\") pod \"redhat-marketplace-zw2z8\" (UID: \"909f168f-fd5f-4b75-909e-514deaea8397\") " pod="openshift-marketplace/redhat-marketplace-zw2z8" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.733839 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/909f168f-fd5f-4b75-909e-514deaea8397-catalog-content\") pod \"redhat-marketplace-zw2z8\" (UID: \"909f168f-fd5f-4b75-909e-514deaea8397\") " pod="openshift-marketplace/redhat-marketplace-zw2z8" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.734379 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/909f168f-fd5f-4b75-909e-514deaea8397-utilities\") pod \"redhat-marketplace-zw2z8\" (UID: \"909f168f-fd5f-4b75-909e-514deaea8397\") " pod="openshift-marketplace/redhat-marketplace-zw2z8" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.757773 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-x85hv\" (UniqueName: \"kubernetes.io/projected/909f168f-fd5f-4b75-909e-514deaea8397-kube-api-access-x85hv\") pod \"redhat-marketplace-zw2z8\" (UID: \"909f168f-fd5f-4b75-909e-514deaea8397\") " pod="openshift-marketplace/redhat-marketplace-zw2z8" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.785768 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d28df1d-4619-45dc-8fee-b482cfad0ead" path="/var/lib/kubelet/pods/2d28df1d-4619-45dc-8fee-b482cfad0ead/volumes" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.786401 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d9cf73a-1c55-49b9-9664-393fd1ea11ec" path="/var/lib/kubelet/pods/5d9cf73a-1c55-49b9-9664-393fd1ea11ec/volumes" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.787461 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84d95933-fc27-469e-91be-cb781299ccd2" path="/var/lib/kubelet/pods/84d95933-fc27-469e-91be-cb781299ccd2/volumes" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.788694 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a527979f-66df-45e5-a643-2c2ebc9fc7c6" path="/var/lib/kubelet/pods/a527979f-66df-45e5-a643-2c2ebc9fc7c6/volumes" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.789351 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7e91b03-5282-4f6e-8ed2-a44afa3fc350" path="/var/lib/kubelet/pods/f7e91b03-5282-4f6e-8ed2-a44afa3fc350/volumes" Jan 30 00:15:58 crc kubenswrapper[5113]: I0130 00:15:58.864930 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zw2z8" Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.258557 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-zw2z8"] Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.525109 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zvvkv"] Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.535996 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zvvkv" Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.539188 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zvvkv"] Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.543802 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"redhat-operators-dockercfg-9gxlh\"" Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.648942 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95aea721-4924-4c1a-9a22-8e078dbd1a05-catalog-content\") pod \"redhat-operators-zvvkv\" (UID: \"95aea721-4924-4c1a-9a22-8e078dbd1a05\") " pod="openshift-marketplace/redhat-operators-zvvkv" Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.649025 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95aea721-4924-4c1a-9a22-8e078dbd1a05-utilities\") pod \"redhat-operators-zvvkv\" (UID: \"95aea721-4924-4c1a-9a22-8e078dbd1a05\") " pod="openshift-marketplace/redhat-operators-zvvkv" Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.649105 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gj8dj\" (UniqueName: \"kubernetes.io/projected/95aea721-4924-4c1a-9a22-8e078dbd1a05-kube-api-access-gj8dj\") pod \"redhat-operators-zvvkv\" (UID: \"95aea721-4924-4c1a-9a22-8e078dbd1a05\") " pod="openshift-marketplace/redhat-operators-zvvkv" Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.750577 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95aea721-4924-4c1a-9a22-8e078dbd1a05-catalog-content\") pod \"redhat-operators-zvvkv\" (UID: \"95aea721-4924-4c1a-9a22-8e078dbd1a05\") " pod="openshift-marketplace/redhat-operators-zvvkv" Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.750672 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95aea721-4924-4c1a-9a22-8e078dbd1a05-utilities\") pod \"redhat-operators-zvvkv\" (UID: \"95aea721-4924-4c1a-9a22-8e078dbd1a05\") " pod="openshift-marketplace/redhat-operators-zvvkv" Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.750734 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-gj8dj\" (UniqueName: \"kubernetes.io/projected/95aea721-4924-4c1a-9a22-8e078dbd1a05-kube-api-access-gj8dj\") pod \"redhat-operators-zvvkv\" (UID: \"95aea721-4924-4c1a-9a22-8e078dbd1a05\") " pod="openshift-marketplace/redhat-operators-zvvkv" Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.751263 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/95aea721-4924-4c1a-9a22-8e078dbd1a05-catalog-content\") pod \"redhat-operators-zvvkv\" (UID: \"95aea721-4924-4c1a-9a22-8e078dbd1a05\") " pod="openshift-marketplace/redhat-operators-zvvkv" Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.751504 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/95aea721-4924-4c1a-9a22-8e078dbd1a05-utilities\") pod \"redhat-operators-zvvkv\" (UID: \"95aea721-4924-4c1a-9a22-8e078dbd1a05\") " pod="openshift-marketplace/redhat-operators-zvvkv" Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.772198 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-gj8dj\" (UniqueName: \"kubernetes.io/projected/95aea721-4924-4c1a-9a22-8e078dbd1a05-kube-api-access-gj8dj\") pod \"redhat-operators-zvvkv\" (UID: \"95aea721-4924-4c1a-9a22-8e078dbd1a05\") " pod="openshift-marketplace/redhat-operators-zvvkv" Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.867183 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zvvkv" Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.965381 5113 generic.go:358] "Generic (PLEG): container finished" podID="909f168f-fd5f-4b75-909e-514deaea8397" containerID="2444e876087b90e83211dfbe36dfd8459e698b792af3b6e0698d71e740b5560b" exitCode=0 Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.965501 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zw2z8" event={"ID":"909f168f-fd5f-4b75-909e-514deaea8397","Type":"ContainerDied","Data":"2444e876087b90e83211dfbe36dfd8459e698b792af3b6e0698d71e740b5560b"} Jan 30 00:15:59 crc kubenswrapper[5113]: I0130 00:15:59.966042 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zw2z8" event={"ID":"909f168f-fd5f-4b75-909e-514deaea8397","Type":"ContainerStarted","Data":"ffe622ccfa50ed7336982623e30860d3a5207b66a0805dcec17a7a5807eced95"} Jan 30 00:16:00 crc kubenswrapper[5113]: I0130 00:16:00.289603 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zvvkv"] Jan 30 00:16:00 crc kubenswrapper[5113]: I0130 00:16:00.938020 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xwlmk"] Jan 30 00:16:00 crc kubenswrapper[5113]: I0130 00:16:00.950272 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xwlmk"] Jan 30 00:16:00 crc kubenswrapper[5113]: I0130 00:16:00.950468 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xwlmk" Jan 30 00:16:00 crc kubenswrapper[5113]: I0130 00:16:00.953600 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"community-operators-dockercfg-vrd5f\"" Jan 30 00:16:00 crc kubenswrapper[5113]: I0130 00:16:00.981213 5113 generic.go:358] "Generic (PLEG): container finished" podID="95aea721-4924-4c1a-9a22-8e078dbd1a05" containerID="1cf34b189e4ffa4dabf72bec59eef076583c79dab05fcb35de831f4e5ba292f9" exitCode=0 Jan 30 00:16:00 crc kubenswrapper[5113]: I0130 00:16:00.981219 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zvvkv" event={"ID":"95aea721-4924-4c1a-9a22-8e078dbd1a05","Type":"ContainerDied","Data":"1cf34b189e4ffa4dabf72bec59eef076583c79dab05fcb35de831f4e5ba292f9"} Jan 30 00:16:00 crc kubenswrapper[5113]: I0130 00:16:00.981423 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zvvkv" event={"ID":"95aea721-4924-4c1a-9a22-8e078dbd1a05","Type":"ContainerStarted","Data":"008a9d2b5625f584b638aab4cfc8e55ea449f8e11bcf94cd5389353b524a7452"} Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.072296 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f73f040-c591-4342-a2b3-cfa77b826069-catalog-content\") pod \"community-operators-xwlmk\" (UID: \"3f73f040-c591-4342-a2b3-cfa77b826069\") " pod="openshift-marketplace/community-operators-xwlmk" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.072366 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f73f040-c591-4342-a2b3-cfa77b826069-utilities\") pod \"community-operators-xwlmk\" (UID: \"3f73f040-c591-4342-a2b3-cfa77b826069\") " pod="openshift-marketplace/community-operators-xwlmk" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.072590 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbj9w\" (UniqueName: \"kubernetes.io/projected/3f73f040-c591-4342-a2b3-cfa77b826069-kube-api-access-jbj9w\") pod \"community-operators-xwlmk\" (UID: \"3f73f040-c591-4342-a2b3-cfa77b826069\") " pod="openshift-marketplace/community-operators-xwlmk" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.174464 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f73f040-c591-4342-a2b3-cfa77b826069-catalog-content\") pod \"community-operators-xwlmk\" (UID: \"3f73f040-c591-4342-a2b3-cfa77b826069\") " pod="openshift-marketplace/community-operators-xwlmk" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.174533 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f73f040-c591-4342-a2b3-cfa77b826069-utilities\") pod \"community-operators-xwlmk\" (UID: \"3f73f040-c591-4342-a2b3-cfa77b826069\") " pod="openshift-marketplace/community-operators-xwlmk" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.174611 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-jbj9w\" (UniqueName: \"kubernetes.io/projected/3f73f040-c591-4342-a2b3-cfa77b826069-kube-api-access-jbj9w\") pod \"community-operators-xwlmk\" (UID: \"3f73f040-c591-4342-a2b3-cfa77b826069\") " pod="openshift-marketplace/community-operators-xwlmk" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.175139 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3f73f040-c591-4342-a2b3-cfa77b826069-catalog-content\") pod \"community-operators-xwlmk\" (UID: \"3f73f040-c591-4342-a2b3-cfa77b826069\") " pod="openshift-marketplace/community-operators-xwlmk" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.175276 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3f73f040-c591-4342-a2b3-cfa77b826069-utilities\") pod \"community-operators-xwlmk\" (UID: \"3f73f040-c591-4342-a2b3-cfa77b826069\") " pod="openshift-marketplace/community-operators-xwlmk" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.203733 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbj9w\" (UniqueName: \"kubernetes.io/projected/3f73f040-c591-4342-a2b3-cfa77b826069-kube-api-access-jbj9w\") pod \"community-operators-xwlmk\" (UID: \"3f73f040-c591-4342-a2b3-cfa77b826069\") " pod="openshift-marketplace/community-operators-xwlmk" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.238111 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-5d9d95bf5b-7xj6k"] Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.249103 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.256345 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-5d9d95bf5b-7xj6k"] Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.273708 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xwlmk" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.376589 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d00efc18-d559-4d4e-974d-a8dae87a948a-registry-tls\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.376650 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d00efc18-d559-4d4e-974d-a8dae87a948a-ca-trust-extracted\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.376677 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d00efc18-d559-4d4e-974d-a8dae87a948a-bound-sa-token\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.376722 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d00efc18-d559-4d4e-974d-a8dae87a948a-installation-pull-secrets\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.376775 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.376809 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d00efc18-d559-4d4e-974d-a8dae87a948a-trusted-ca\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.376832 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d00efc18-d559-4d4e-974d-a8dae87a948a-registry-certificates\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.376852 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5q92\" (UniqueName: \"kubernetes.io/projected/d00efc18-d559-4d4e-974d-a8dae87a948a-kube-api-access-r5q92\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.418001 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.478045 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d00efc18-d559-4d4e-974d-a8dae87a948a-installation-pull-secrets\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.478140 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d00efc18-d559-4d4e-974d-a8dae87a948a-trusted-ca\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.478162 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d00efc18-d559-4d4e-974d-a8dae87a948a-registry-certificates\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.478180 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-r5q92\" (UniqueName: \"kubernetes.io/projected/d00efc18-d559-4d4e-974d-a8dae87a948a-kube-api-access-r5q92\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.478221 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d00efc18-d559-4d4e-974d-a8dae87a948a-registry-tls\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.478242 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d00efc18-d559-4d4e-974d-a8dae87a948a-ca-trust-extracted\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.478261 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d00efc18-d559-4d4e-974d-a8dae87a948a-bound-sa-token\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.480251 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d00efc18-d559-4d4e-974d-a8dae87a948a-ca-trust-extracted\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.481137 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d00efc18-d559-4d4e-974d-a8dae87a948a-trusted-ca\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.481269 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d00efc18-d559-4d4e-974d-a8dae87a948a-registry-certificates\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.486625 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d00efc18-d559-4d4e-974d-a8dae87a948a-registry-tls\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.487127 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d00efc18-d559-4d4e-974d-a8dae87a948a-installation-pull-secrets\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.501599 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d00efc18-d559-4d4e-974d-a8dae87a948a-bound-sa-token\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.502961 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5q92\" (UniqueName: \"kubernetes.io/projected/d00efc18-d559-4d4e-974d-a8dae87a948a-kube-api-access-r5q92\") pod \"image-registry-5d9d95bf5b-7xj6k\" (UID: \"d00efc18-d559-4d4e-974d-a8dae87a948a\") " pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.652673 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.715472 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xwlmk"] Jan 30 00:16:01 crc kubenswrapper[5113]: W0130 00:16:01.721407 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3f73f040_c591_4342_a2b3_cfa77b826069.slice/crio-165126417c1fb6a465568e80151433b1da1bf1de0c61858d5ec8fc3c7237d3dd WatchSource:0}: Error finding container 165126417c1fb6a465568e80151433b1da1bf1de0c61858d5ec8fc3c7237d3dd: Status 404 returned error can't find the container with id 165126417c1fb6a465568e80151433b1da1bf1de0c61858d5ec8fc3c7237d3dd Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.917855 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-22l6b"] Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.924426 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-22l6b" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.928242 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"certified-operators-dockercfg-7cl8d\"" Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.936781 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-22l6b"] Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.992424 5113 generic.go:358] "Generic (PLEG): container finished" podID="909f168f-fd5f-4b75-909e-514deaea8397" containerID="a026ac1a933f3fb229679c65180ee62adaedf95cb8ea135ade935daa2ca9cc87" exitCode=0 Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.992616 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zw2z8" event={"ID":"909f168f-fd5f-4b75-909e-514deaea8397","Type":"ContainerDied","Data":"a026ac1a933f3fb229679c65180ee62adaedf95cb8ea135ade935daa2ca9cc87"} Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.994230 5113 generic.go:358] "Generic (PLEG): container finished" podID="3f73f040-c591-4342-a2b3-cfa77b826069" containerID="d0240b314f9447be480ba885c6c7f95c022e3e16da3363d78966e110d28660ec" exitCode=0 Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.994455 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xwlmk" event={"ID":"3f73f040-c591-4342-a2b3-cfa77b826069","Type":"ContainerDied","Data":"d0240b314f9447be480ba885c6c7f95c022e3e16da3363d78966e110d28660ec"} Jan 30 00:16:01 crc kubenswrapper[5113]: I0130 00:16:01.994490 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xwlmk" event={"ID":"3f73f040-c591-4342-a2b3-cfa77b826069","Type":"ContainerStarted","Data":"165126417c1fb6a465568e80151433b1da1bf1de0c61858d5ec8fc3c7237d3dd"} Jan 30 00:16:02 crc kubenswrapper[5113]: I0130 00:16:02.089048 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6q6g2\" (UniqueName: \"kubernetes.io/projected/ca96fb3e-7dc2-4095-a77a-7125b34d5804-kube-api-access-6q6g2\") pod \"certified-operators-22l6b\" (UID: \"ca96fb3e-7dc2-4095-a77a-7125b34d5804\") " pod="openshift-marketplace/certified-operators-22l6b" Jan 30 00:16:02 crc kubenswrapper[5113]: I0130 00:16:02.089120 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-5d9d95bf5b-7xj6k"] Jan 30 00:16:02 crc kubenswrapper[5113]: I0130 00:16:02.089198 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca96fb3e-7dc2-4095-a77a-7125b34d5804-catalog-content\") pod \"certified-operators-22l6b\" (UID: \"ca96fb3e-7dc2-4095-a77a-7125b34d5804\") " pod="openshift-marketplace/certified-operators-22l6b" Jan 30 00:16:02 crc kubenswrapper[5113]: I0130 00:16:02.089277 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca96fb3e-7dc2-4095-a77a-7125b34d5804-utilities\") pod \"certified-operators-22l6b\" (UID: \"ca96fb3e-7dc2-4095-a77a-7125b34d5804\") " pod="openshift-marketplace/certified-operators-22l6b" Jan 30 00:16:02 crc kubenswrapper[5113]: I0130 00:16:02.190946 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-6q6g2\" (UniqueName: \"kubernetes.io/projected/ca96fb3e-7dc2-4095-a77a-7125b34d5804-kube-api-access-6q6g2\") pod \"certified-operators-22l6b\" (UID: \"ca96fb3e-7dc2-4095-a77a-7125b34d5804\") " pod="openshift-marketplace/certified-operators-22l6b" Jan 30 00:16:02 crc kubenswrapper[5113]: I0130 00:16:02.191025 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca96fb3e-7dc2-4095-a77a-7125b34d5804-catalog-content\") pod \"certified-operators-22l6b\" (UID: \"ca96fb3e-7dc2-4095-a77a-7125b34d5804\") " pod="openshift-marketplace/certified-operators-22l6b" Jan 30 00:16:02 crc kubenswrapper[5113]: I0130 00:16:02.191067 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca96fb3e-7dc2-4095-a77a-7125b34d5804-utilities\") pod \"certified-operators-22l6b\" (UID: \"ca96fb3e-7dc2-4095-a77a-7125b34d5804\") " pod="openshift-marketplace/certified-operators-22l6b" Jan 30 00:16:02 crc kubenswrapper[5113]: I0130 00:16:02.191446 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ca96fb3e-7dc2-4095-a77a-7125b34d5804-utilities\") pod \"certified-operators-22l6b\" (UID: \"ca96fb3e-7dc2-4095-a77a-7125b34d5804\") " pod="openshift-marketplace/certified-operators-22l6b" Jan 30 00:16:02 crc kubenswrapper[5113]: I0130 00:16:02.191774 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ca96fb3e-7dc2-4095-a77a-7125b34d5804-catalog-content\") pod \"certified-operators-22l6b\" (UID: \"ca96fb3e-7dc2-4095-a77a-7125b34d5804\") " pod="openshift-marketplace/certified-operators-22l6b" Jan 30 00:16:02 crc kubenswrapper[5113]: I0130 00:16:02.212049 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-6q6g2\" (UniqueName: \"kubernetes.io/projected/ca96fb3e-7dc2-4095-a77a-7125b34d5804-kube-api-access-6q6g2\") pod \"certified-operators-22l6b\" (UID: \"ca96fb3e-7dc2-4095-a77a-7125b34d5804\") " pod="openshift-marketplace/certified-operators-22l6b" Jan 30 00:16:02 crc kubenswrapper[5113]: I0130 00:16:02.278489 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-22l6b" Jan 30 00:16:02 crc kubenswrapper[5113]: I0130 00:16:02.794804 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-22l6b"] Jan 30 00:16:03 crc kubenswrapper[5113]: I0130 00:16:03.004657 5113 generic.go:358] "Generic (PLEG): container finished" podID="ca96fb3e-7dc2-4095-a77a-7125b34d5804" containerID="4907769fe85bd13db79306c0eb822e8a7828871601c198a9cd864e0b44445192" exitCode=0 Jan 30 00:16:03 crc kubenswrapper[5113]: I0130 00:16:03.004776 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-22l6b" event={"ID":"ca96fb3e-7dc2-4095-a77a-7125b34d5804","Type":"ContainerDied","Data":"4907769fe85bd13db79306c0eb822e8a7828871601c198a9cd864e0b44445192"} Jan 30 00:16:03 crc kubenswrapper[5113]: I0130 00:16:03.004843 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-22l6b" event={"ID":"ca96fb3e-7dc2-4095-a77a-7125b34d5804","Type":"ContainerStarted","Data":"80122b7636151892826a3096489f6bee661db917e84d45a90c0c7790ce42925c"} Jan 30 00:16:03 crc kubenswrapper[5113]: I0130 00:16:03.008173 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" event={"ID":"d00efc18-d559-4d4e-974d-a8dae87a948a","Type":"ContainerStarted","Data":"2ff777cdeb9fd0752e62a5e7995815e581a4d775e2f86f1927fed1ae94816855"} Jan 30 00:16:03 crc kubenswrapper[5113]: I0130 00:16:03.008197 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" event={"ID":"d00efc18-d559-4d4e-974d-a8dae87a948a","Type":"ContainerStarted","Data":"45b73b8c45f706902a2e8c8c10b5b28927e5c17767a6a55a66becb01816efc3d"} Jan 30 00:16:03 crc kubenswrapper[5113]: I0130 00:16:03.008302 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:03 crc kubenswrapper[5113]: I0130 00:16:03.012848 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zw2z8" event={"ID":"909f168f-fd5f-4b75-909e-514deaea8397","Type":"ContainerStarted","Data":"0d5b0bfd222e4a49c467c7c5f12dcde65bd95681f1583653d117ba745ebaf67b"} Jan 30 00:16:03 crc kubenswrapper[5113]: I0130 00:16:03.016616 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xwlmk" event={"ID":"3f73f040-c591-4342-a2b3-cfa77b826069","Type":"ContainerStarted","Data":"1f36f124607f323f391aa9a3d71fd444ba7bb328b0f67d689d50f30f6fd51b3e"} Jan 30 00:16:03 crc kubenswrapper[5113]: I0130 00:16:03.018698 5113 generic.go:358] "Generic (PLEG): container finished" podID="95aea721-4924-4c1a-9a22-8e078dbd1a05" containerID="192fcfea83ab2fa31ba21ef6f3410dcdb1bf2d29484f37f7408c49231ca417dc" exitCode=0 Jan 30 00:16:03 crc kubenswrapper[5113]: I0130 00:16:03.018781 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zvvkv" event={"ID":"95aea721-4924-4c1a-9a22-8e078dbd1a05","Type":"ContainerDied","Data":"192fcfea83ab2fa31ba21ef6f3410dcdb1bf2d29484f37f7408c49231ca417dc"} Jan 30 00:16:03 crc kubenswrapper[5113]: I0130 00:16:03.124300 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" podStartSLOduration=2.1242770970000002 podStartE2EDuration="2.124277097s" podCreationTimestamp="2026-01-30 00:16:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:16:03.09884794 +0000 UTC m=+383.171453317" watchObservedRunningTime="2026-01-30 00:16:03.124277097 +0000 UTC m=+383.196882474" Jan 30 00:16:04 crc kubenswrapper[5113]: I0130 00:16:04.029203 5113 generic.go:358] "Generic (PLEG): container finished" podID="3f73f040-c591-4342-a2b3-cfa77b826069" containerID="1f36f124607f323f391aa9a3d71fd444ba7bb328b0f67d689d50f30f6fd51b3e" exitCode=0 Jan 30 00:16:04 crc kubenswrapper[5113]: I0130 00:16:04.029337 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xwlmk" event={"ID":"3f73f040-c591-4342-a2b3-cfa77b826069","Type":"ContainerDied","Data":"1f36f124607f323f391aa9a3d71fd444ba7bb328b0f67d689d50f30f6fd51b3e"} Jan 30 00:16:04 crc kubenswrapper[5113]: I0130 00:16:04.033789 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zvvkv" event={"ID":"95aea721-4924-4c1a-9a22-8e078dbd1a05","Type":"ContainerStarted","Data":"ca565efc2c23adb4dc9ba18fe4d74809a6b363fe8df218cfefcc1f27cdf28312"} Jan 30 00:16:04 crc kubenswrapper[5113]: I0130 00:16:04.054644 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-zw2z8" podStartSLOduration=4.854495235 podStartE2EDuration="6.054588899s" podCreationTimestamp="2026-01-30 00:15:58 +0000 UTC" firstStartedPulling="2026-01-30 00:15:59.967223348 +0000 UTC m=+380.039828735" lastFinishedPulling="2026-01-30 00:16:01.167317012 +0000 UTC m=+381.239922399" observedRunningTime="2026-01-30 00:16:03.130583562 +0000 UTC m=+383.203188949" watchObservedRunningTime="2026-01-30 00:16:04.054588899 +0000 UTC m=+384.127194286" Jan 30 00:16:04 crc kubenswrapper[5113]: I0130 00:16:04.079220 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zvvkv" podStartSLOduration=4.181021683 podStartE2EDuration="5.07919486s" podCreationTimestamp="2026-01-30 00:15:59 +0000 UTC" firstStartedPulling="2026-01-30 00:16:00.982958714 +0000 UTC m=+381.055564101" lastFinishedPulling="2026-01-30 00:16:01.881131901 +0000 UTC m=+381.953737278" observedRunningTime="2026-01-30 00:16:04.072978048 +0000 UTC m=+384.145583465" watchObservedRunningTime="2026-01-30 00:16:04.07919486 +0000 UTC m=+384.151800277" Jan 30 00:16:05 crc kubenswrapper[5113]: I0130 00:16:05.063036 5113 generic.go:358] "Generic (PLEG): container finished" podID="ca96fb3e-7dc2-4095-a77a-7125b34d5804" containerID="9b738f71333a8202945e220a6a7f6a3ade39882a375a4f9fce9cd9f098d96934" exitCode=0 Jan 30 00:16:05 crc kubenswrapper[5113]: I0130 00:16:05.063647 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-22l6b" event={"ID":"ca96fb3e-7dc2-4095-a77a-7125b34d5804","Type":"ContainerDied","Data":"9b738f71333a8202945e220a6a7f6a3ade39882a375a4f9fce9cd9f098d96934"} Jan 30 00:16:05 crc kubenswrapper[5113]: I0130 00:16:05.068294 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xwlmk" event={"ID":"3f73f040-c591-4342-a2b3-cfa77b826069","Type":"ContainerStarted","Data":"a3fa4b2e42e77c13c7d0a0b9f1b4b46616b266cf1d2af5d424dad997bf5ce4f3"} Jan 30 00:16:05 crc kubenswrapper[5113]: I0130 00:16:05.108593 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xwlmk" podStartSLOduration=4.338766576 podStartE2EDuration="5.108566949s" podCreationTimestamp="2026-01-30 00:16:00 +0000 UTC" firstStartedPulling="2026-01-30 00:16:01.995991556 +0000 UTC m=+382.068596933" lastFinishedPulling="2026-01-30 00:16:02.765791929 +0000 UTC m=+382.838397306" observedRunningTime="2026-01-30 00:16:05.1047387 +0000 UTC m=+385.177344097" watchObservedRunningTime="2026-01-30 00:16:05.108566949 +0000 UTC m=+385.181172326" Jan 30 00:16:06 crc kubenswrapper[5113]: I0130 00:16:06.079948 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-22l6b" event={"ID":"ca96fb3e-7dc2-4095-a77a-7125b34d5804","Type":"ContainerStarted","Data":"d17c50055bf307f10423b572fd2cbf95efb2dae1bba2f9c268a11fcca6ccf7ec"} Jan 30 00:16:06 crc kubenswrapper[5113]: I0130 00:16:06.106083 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-22l6b" podStartSLOduration=3.859596921 podStartE2EDuration="5.106046459s" podCreationTimestamp="2026-01-30 00:16:01 +0000 UTC" firstStartedPulling="2026-01-30 00:16:03.005870942 +0000 UTC m=+383.078476319" lastFinishedPulling="2026-01-30 00:16:04.25232048 +0000 UTC m=+384.324925857" observedRunningTime="2026-01-30 00:16:06.098599049 +0000 UTC m=+386.171204426" watchObservedRunningTime="2026-01-30 00:16:06.106046459 +0000 UTC m=+386.178651836" Jan 30 00:16:08 crc kubenswrapper[5113]: I0130 00:16:08.865698 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/redhat-marketplace-zw2z8" Jan 30 00:16:08 crc kubenswrapper[5113]: I0130 00:16:08.866235 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-zw2z8" Jan 30 00:16:08 crc kubenswrapper[5113]: I0130 00:16:08.919378 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-zw2z8" Jan 30 00:16:09 crc kubenswrapper[5113]: I0130 00:16:09.147814 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-zw2z8" Jan 30 00:16:09 crc kubenswrapper[5113]: I0130 00:16:09.867910 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/redhat-operators-zvvkv" Jan 30 00:16:09 crc kubenswrapper[5113]: I0130 00:16:09.868680 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zvvkv" Jan 30 00:16:09 crc kubenswrapper[5113]: I0130 00:16:09.935440 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zvvkv" Jan 30 00:16:10 crc kubenswrapper[5113]: I0130 00:16:10.171001 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zvvkv" Jan 30 00:16:11 crc kubenswrapper[5113]: I0130 00:16:11.274631 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xwlmk" Jan 30 00:16:11 crc kubenswrapper[5113]: I0130 00:16:11.274705 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/community-operators-xwlmk" Jan 30 00:16:11 crc kubenswrapper[5113]: I0130 00:16:11.336847 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xwlmk" Jan 30 00:16:12 crc kubenswrapper[5113]: I0130 00:16:12.194481 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xwlmk" Jan 30 00:16:12 crc kubenswrapper[5113]: I0130 00:16:12.279409 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/certified-operators-22l6b" Jan 30 00:16:12 crc kubenswrapper[5113]: I0130 00:16:12.279476 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-22l6b" Jan 30 00:16:12 crc kubenswrapper[5113]: I0130 00:16:12.331534 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-22l6b" Jan 30 00:16:13 crc kubenswrapper[5113]: I0130 00:16:13.182677 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-22l6b" Jan 30 00:16:21 crc kubenswrapper[5113]: I0130 00:16:21.195361 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:16:21 crc kubenswrapper[5113]: I0130 00:16:21.196389 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:16:24 crc kubenswrapper[5113]: I0130 00:16:24.040185 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-5d9d95bf5b-7xj6k" Jan 30 00:16:24 crc kubenswrapper[5113]: I0130 00:16:24.098549 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-66587d64c8-bnj47"] Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.142670 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-image-registry/image-registry-66587d64c8-bnj47" podUID="de29c822-8061-4f04-9a8a-b36f6ab0082e" containerName="registry" containerID="cri-o://fc8a644997413f585441a09ac509df0c34e78409a4372ce967ef4a6cbe8cd030" gracePeriod=30 Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.428789 5113 generic.go:358] "Generic (PLEG): container finished" podID="de29c822-8061-4f04-9a8a-b36f6ab0082e" containerID="fc8a644997413f585441a09ac509df0c34e78409a4372ce967ef4a6cbe8cd030" exitCode=0 Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.428925 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66587d64c8-bnj47" event={"ID":"de29c822-8061-4f04-9a8a-b36f6ab0082e","Type":"ContainerDied","Data":"fc8a644997413f585441a09ac509df0c34e78409a4372ce967ef4a6cbe8cd030"} Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.648078 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.835389 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-registry-tls\") pod \"de29c822-8061-4f04-9a8a-b36f6ab0082e\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.836183 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2\") pod \"de29c822-8061-4f04-9a8a-b36f6ab0082e\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.836309 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/de29c822-8061-4f04-9a8a-b36f6ab0082e-installation-pull-secrets\") pod \"de29c822-8061-4f04-9a8a-b36f6ab0082e\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.836482 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/de29c822-8061-4f04-9a8a-b36f6ab0082e-trusted-ca\") pod \"de29c822-8061-4f04-9a8a-b36f6ab0082e\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.837320 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/de29c822-8061-4f04-9a8a-b36f6ab0082e-ca-trust-extracted\") pod \"de29c822-8061-4f04-9a8a-b36f6ab0082e\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.837510 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/de29c822-8061-4f04-9a8a-b36f6ab0082e-registry-certificates\") pod \"de29c822-8061-4f04-9a8a-b36f6ab0082e\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.837614 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-bound-sa-token\") pod \"de29c822-8061-4f04-9a8a-b36f6ab0082e\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.837722 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de29c822-8061-4f04-9a8a-b36f6ab0082e-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "de29c822-8061-4f04-9a8a-b36f6ab0082e" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.837743 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c9g2k\" (UniqueName: \"kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-kube-api-access-c9g2k\") pod \"de29c822-8061-4f04-9a8a-b36f6ab0082e\" (UID: \"de29c822-8061-4f04-9a8a-b36f6ab0082e\") " Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.838579 5113 reconciler_common.go:299] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/de29c822-8061-4f04-9a8a-b36f6ab0082e-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.839061 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de29c822-8061-4f04-9a8a-b36f6ab0082e-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "de29c822-8061-4f04-9a8a-b36f6ab0082e" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.843950 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "de29c822-8061-4f04-9a8a-b36f6ab0082e" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.844706 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de29c822-8061-4f04-9a8a-b36f6ab0082e-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "de29c822-8061-4f04-9a8a-b36f6ab0082e" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.850161 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "de29c822-8061-4f04-9a8a-b36f6ab0082e" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.850387 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2" (OuterVolumeSpecName: "registry-storage") pod "de29c822-8061-4f04-9a8a-b36f6ab0082e" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e"). InnerVolumeSpecName "pvc-b21f41aa-58d4-44b1-aeaa-280a8e32ddf2". PluginName "kubernetes.io/csi", VolumeGIDValue "" Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.850800 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-kube-api-access-c9g2k" (OuterVolumeSpecName: "kube-api-access-c9g2k") pod "de29c822-8061-4f04-9a8a-b36f6ab0082e" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e"). InnerVolumeSpecName "kube-api-access-c9g2k". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.877138 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de29c822-8061-4f04-9a8a-b36f6ab0082e-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "de29c822-8061-4f04-9a8a-b36f6ab0082e" (UID: "de29c822-8061-4f04-9a8a-b36f6ab0082e"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.940145 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-c9g2k\" (UniqueName: \"kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-kube-api-access-c9g2k\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.940190 5113 reconciler_common.go:299] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.940202 5113 reconciler_common.go:299] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/de29c822-8061-4f04-9a8a-b36f6ab0082e-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.940211 5113 reconciler_common.go:299] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/de29c822-8061-4f04-9a8a-b36f6ab0082e-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.940219 5113 reconciler_common.go:299] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/de29c822-8061-4f04-9a8a-b36f6ab0082e-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:49 crc kubenswrapper[5113]: I0130 00:16:49.940227 5113 reconciler_common.go:299] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/de29c822-8061-4f04-9a8a-b36f6ab0082e-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 30 00:16:50 crc kubenswrapper[5113]: I0130 00:16:50.438338 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66587d64c8-bnj47" event={"ID":"de29c822-8061-4f04-9a8a-b36f6ab0082e","Type":"ContainerDied","Data":"714adcdce307ec70da1452a29a6a4efa4b772a48837822463ffb51f5fd1e91b2"} Jan 30 00:16:50 crc kubenswrapper[5113]: I0130 00:16:50.438419 5113 scope.go:117] "RemoveContainer" containerID="fc8a644997413f585441a09ac509df0c34e78409a4372ce967ef4a6cbe8cd030" Jan 30 00:16:50 crc kubenswrapper[5113]: I0130 00:16:50.438511 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66587d64c8-bnj47" Jan 30 00:16:50 crc kubenswrapper[5113]: I0130 00:16:50.493724 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-66587d64c8-bnj47"] Jan 30 00:16:50 crc kubenswrapper[5113]: I0130 00:16:50.496580 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-66587d64c8-bnj47"] Jan 30 00:16:50 crc kubenswrapper[5113]: I0130 00:16:50.787839 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de29c822-8061-4f04-9a8a-b36f6ab0082e" path="/var/lib/kubelet/pods/de29c822-8061-4f04-9a8a-b36f6ab0082e/volumes" Jan 30 00:16:51 crc kubenswrapper[5113]: I0130 00:16:51.196131 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:16:51 crc kubenswrapper[5113]: I0130 00:16:51.196279 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:17:21 crc kubenswrapper[5113]: I0130 00:17:21.196385 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:17:21 crc kubenswrapper[5113]: I0130 00:17:21.199738 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:17:21 crc kubenswrapper[5113]: I0130 00:17:21.200434 5113 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:17:21 crc kubenswrapper[5113]: I0130 00:17:21.201463 5113 kuberuntime_manager.go:1107] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d2c027b2bad57e7da7ce244d6807304cf78d526f9fcc9b86a42e7865e2bae0e7"} pod="openshift-machine-config-operator/machine-config-daemon-gxph5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 00:17:21 crc kubenswrapper[5113]: I0130 00:17:21.201708 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" containerID="cri-o://d2c027b2bad57e7da7ce244d6807304cf78d526f9fcc9b86a42e7865e2bae0e7" gracePeriod=600 Jan 30 00:17:21 crc kubenswrapper[5113]: I0130 00:17:21.680201 5113 generic.go:358] "Generic (PLEG): container finished" podID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerID="d2c027b2bad57e7da7ce244d6807304cf78d526f9fcc9b86a42e7865e2bae0e7" exitCode=0 Jan 30 00:17:21 crc kubenswrapper[5113]: I0130 00:17:21.680326 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" event={"ID":"dccb6bc1-d2db-4bf2-a0db-1c84219d0499","Type":"ContainerDied","Data":"d2c027b2bad57e7da7ce244d6807304cf78d526f9fcc9b86a42e7865e2bae0e7"} Jan 30 00:17:21 crc kubenswrapper[5113]: I0130 00:17:21.680818 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" event={"ID":"dccb6bc1-d2db-4bf2-a0db-1c84219d0499","Type":"ContainerStarted","Data":"d8d25d4044f296bffb6b6885c2a6830e6967a1638a903216ca33f356c73951bf"} Jan 30 00:17:21 crc kubenswrapper[5113]: I0130 00:17:21.680852 5113 scope.go:117] "RemoveContainer" containerID="3448c37a3f80491c2d3cfa4d86f18abd9731d5d8a7722c07abbbacc4c6189249" Jan 30 00:18:00 crc kubenswrapper[5113]: I0130 00:18:00.150309 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29495538-dkktd"] Jan 30 00:18:00 crc kubenswrapper[5113]: I0130 00:18:00.153469 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="de29c822-8061-4f04-9a8a-b36f6ab0082e" containerName="registry" Jan 30 00:18:00 crc kubenswrapper[5113]: I0130 00:18:00.153806 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="de29c822-8061-4f04-9a8a-b36f6ab0082e" containerName="registry" Jan 30 00:18:00 crc kubenswrapper[5113]: I0130 00:18:00.154024 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="de29c822-8061-4f04-9a8a-b36f6ab0082e" containerName="registry" Jan 30 00:18:00 crc kubenswrapper[5113]: I0130 00:18:00.159687 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495538-dkktd" Jan 30 00:18:00 crc kubenswrapper[5113]: I0130 00:18:00.161106 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495538-dkktd"] Jan 30 00:18:00 crc kubenswrapper[5113]: I0130 00:18:00.167054 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"kube-root-ca.crt\"" Jan 30 00:18:00 crc kubenswrapper[5113]: I0130 00:18:00.167224 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"openshift-service-ca.crt\"" Jan 30 00:18:00 crc kubenswrapper[5113]: I0130 00:18:00.167349 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-infra\"/\"csr-approver-sa-dockercfg-kshml\"" Jan 30 00:18:00 crc kubenswrapper[5113]: I0130 00:18:00.259684 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r59tt\" (UniqueName: \"kubernetes.io/projected/9044cac0-e686-4518-8879-3082b7842df6-kube-api-access-r59tt\") pod \"auto-csr-approver-29495538-dkktd\" (UID: \"9044cac0-e686-4518-8879-3082b7842df6\") " pod="openshift-infra/auto-csr-approver-29495538-dkktd" Jan 30 00:18:00 crc kubenswrapper[5113]: I0130 00:18:00.361739 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-r59tt\" (UniqueName: \"kubernetes.io/projected/9044cac0-e686-4518-8879-3082b7842df6-kube-api-access-r59tt\") pod \"auto-csr-approver-29495538-dkktd\" (UID: \"9044cac0-e686-4518-8879-3082b7842df6\") " pod="openshift-infra/auto-csr-approver-29495538-dkktd" Jan 30 00:18:00 crc kubenswrapper[5113]: I0130 00:18:00.401116 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-r59tt\" (UniqueName: \"kubernetes.io/projected/9044cac0-e686-4518-8879-3082b7842df6-kube-api-access-r59tt\") pod \"auto-csr-approver-29495538-dkktd\" (UID: \"9044cac0-e686-4518-8879-3082b7842df6\") " pod="openshift-infra/auto-csr-approver-29495538-dkktd" Jan 30 00:18:00 crc kubenswrapper[5113]: I0130 00:18:00.483765 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495538-dkktd" Jan 30 00:18:00 crc kubenswrapper[5113]: I0130 00:18:00.968788 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495538-dkktd"] Jan 30 00:18:00 crc kubenswrapper[5113]: I0130 00:18:00.995572 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495538-dkktd" event={"ID":"9044cac0-e686-4518-8879-3082b7842df6","Type":"ContainerStarted","Data":"7b6d4d115e8eddd3b00dd5c4ccd4efb8d11c0954427df927f52f6faf829db371"} Jan 30 00:18:04 crc kubenswrapper[5113]: I0130 00:18:04.864452 5113 csr.go:274] "Certificate signing request is approved, waiting to be issued" logger="kubernetes.io/kubelet-serving" csr="csr-4hvbf" Jan 30 00:18:04 crc kubenswrapper[5113]: I0130 00:18:04.888499 5113 csr.go:270] "Certificate signing request is issued" logger="kubernetes.io/kubelet-serving" csr="csr-4hvbf" Jan 30 00:18:05 crc kubenswrapper[5113]: I0130 00:18:05.022341 5113 generic.go:358] "Generic (PLEG): container finished" podID="9044cac0-e686-4518-8879-3082b7842df6" containerID="53347a45a6019c6232fb5f7a7ce5f43115e2d60235df398e2567b1a988e926da" exitCode=0 Jan 30 00:18:05 crc kubenswrapper[5113]: I0130 00:18:05.022514 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495538-dkktd" event={"ID":"9044cac0-e686-4518-8879-3082b7842df6","Type":"ContainerDied","Data":"53347a45a6019c6232fb5f7a7ce5f43115e2d60235df398e2567b1a988e926da"} Jan 30 00:18:05 crc kubenswrapper[5113]: I0130 00:18:05.890347 5113 certificate_manager.go:715] "Certificate rotation deadline determined" logger="kubernetes.io/kubelet-serving" expiration="2026-03-01 00:13:04 +0000 UTC" deadline="2026-02-23 07:16:33.17048294 +0000 UTC" Jan 30 00:18:05 crc kubenswrapper[5113]: I0130 00:18:05.891287 5113 certificate_manager.go:431] "Waiting for next certificate rotation" logger="kubernetes.io/kubelet-serving" sleep="582h58m27.279204939s" Jan 30 00:18:06 crc kubenswrapper[5113]: I0130 00:18:06.274361 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495538-dkktd" Jan 30 00:18:06 crc kubenswrapper[5113]: I0130 00:18:06.354275 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r59tt\" (UniqueName: \"kubernetes.io/projected/9044cac0-e686-4518-8879-3082b7842df6-kube-api-access-r59tt\") pod \"9044cac0-e686-4518-8879-3082b7842df6\" (UID: \"9044cac0-e686-4518-8879-3082b7842df6\") " Jan 30 00:18:06 crc kubenswrapper[5113]: I0130 00:18:06.362798 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9044cac0-e686-4518-8879-3082b7842df6-kube-api-access-r59tt" (OuterVolumeSpecName: "kube-api-access-r59tt") pod "9044cac0-e686-4518-8879-3082b7842df6" (UID: "9044cac0-e686-4518-8879-3082b7842df6"). InnerVolumeSpecName "kube-api-access-r59tt". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:18:06 crc kubenswrapper[5113]: I0130 00:18:06.455666 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-r59tt\" (UniqueName: \"kubernetes.io/projected/9044cac0-e686-4518-8879-3082b7842df6-kube-api-access-r59tt\") on node \"crc\" DevicePath \"\"" Jan 30 00:18:06 crc kubenswrapper[5113]: I0130 00:18:06.892262 5113 certificate_manager.go:715] "Certificate rotation deadline determined" logger="kubernetes.io/kubelet-serving" expiration="2026-03-01 00:13:04 +0000 UTC" deadline="2026-02-24 07:06:19.111475013 +0000 UTC" Jan 30 00:18:06 crc kubenswrapper[5113]: I0130 00:18:06.892317 5113 certificate_manager.go:431] "Waiting for next certificate rotation" logger="kubernetes.io/kubelet-serving" sleep="606h48m12.219162651s" Jan 30 00:18:07 crc kubenswrapper[5113]: I0130 00:18:07.038538 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495538-dkktd" event={"ID":"9044cac0-e686-4518-8879-3082b7842df6","Type":"ContainerDied","Data":"7b6d4d115e8eddd3b00dd5c4ccd4efb8d11c0954427df927f52f6faf829db371"} Jan 30 00:18:07 crc kubenswrapper[5113]: I0130 00:18:07.038625 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b6d4d115e8eddd3b00dd5c4ccd4efb8d11c0954427df927f52f6faf829db371" Jan 30 00:18:07 crc kubenswrapper[5113]: I0130 00:18:07.038687 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495538-dkktd" Jan 30 00:19:21 crc kubenswrapper[5113]: I0130 00:19:21.195862 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:19:21 crc kubenswrapper[5113]: I0130 00:19:21.196850 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:19:41 crc kubenswrapper[5113]: I0130 00:19:41.004577 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-576bd486d8-68jgg_3bde0ed2-71a3-4281-b21d-af61bdb778ef/oauth-openshift/1.log" Jan 30 00:19:41 crc kubenswrapper[5113]: I0130 00:19:41.007210 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-576bd486d8-68jgg_3bde0ed2-71a3-4281-b21d-af61bdb778ef/oauth-openshift/1.log" Jan 30 00:19:41 crc kubenswrapper[5113]: I0130 00:19:41.061103 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/0.log" Jan 30 00:19:41 crc kubenswrapper[5113]: I0130 00:19:41.061252 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/0.log" Jan 30 00:19:51 crc kubenswrapper[5113]: I0130 00:19:51.195544 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:19:51 crc kubenswrapper[5113]: I0130 00:19:51.196501 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:20:00 crc kubenswrapper[5113]: I0130 00:20:00.145196 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29495540-hbbvk"] Jan 30 00:20:00 crc kubenswrapper[5113]: I0130 00:20:00.147447 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="9044cac0-e686-4518-8879-3082b7842df6" containerName="oc" Jan 30 00:20:00 crc kubenswrapper[5113]: I0130 00:20:00.147486 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="9044cac0-e686-4518-8879-3082b7842df6" containerName="oc" Jan 30 00:20:00 crc kubenswrapper[5113]: I0130 00:20:00.147660 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="9044cac0-e686-4518-8879-3082b7842df6" containerName="oc" Jan 30 00:20:00 crc kubenswrapper[5113]: I0130 00:20:00.157416 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495540-hbbvk"] Jan 30 00:20:00 crc kubenswrapper[5113]: I0130 00:20:00.157628 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495540-hbbvk" Jan 30 00:20:00 crc kubenswrapper[5113]: I0130 00:20:00.165074 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"openshift-service-ca.crt\"" Jan 30 00:20:00 crc kubenswrapper[5113]: I0130 00:20:00.165468 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-infra\"/\"csr-approver-sa-dockercfg-kshml\"" Jan 30 00:20:00 crc kubenswrapper[5113]: I0130 00:20:00.165729 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"kube-root-ca.crt\"" Jan 30 00:20:00 crc kubenswrapper[5113]: I0130 00:20:00.307584 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j67r8\" (UniqueName: \"kubernetes.io/projected/41ff0f59-4052-4862-aa77-23c8f170082b-kube-api-access-j67r8\") pod \"auto-csr-approver-29495540-hbbvk\" (UID: \"41ff0f59-4052-4862-aa77-23c8f170082b\") " pod="openshift-infra/auto-csr-approver-29495540-hbbvk" Jan 30 00:20:00 crc kubenswrapper[5113]: I0130 00:20:00.409179 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-j67r8\" (UniqueName: \"kubernetes.io/projected/41ff0f59-4052-4862-aa77-23c8f170082b-kube-api-access-j67r8\") pod \"auto-csr-approver-29495540-hbbvk\" (UID: \"41ff0f59-4052-4862-aa77-23c8f170082b\") " pod="openshift-infra/auto-csr-approver-29495540-hbbvk" Jan 30 00:20:00 crc kubenswrapper[5113]: I0130 00:20:00.433522 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-j67r8\" (UniqueName: \"kubernetes.io/projected/41ff0f59-4052-4862-aa77-23c8f170082b-kube-api-access-j67r8\") pod \"auto-csr-approver-29495540-hbbvk\" (UID: \"41ff0f59-4052-4862-aa77-23c8f170082b\") " pod="openshift-infra/auto-csr-approver-29495540-hbbvk" Jan 30 00:20:00 crc kubenswrapper[5113]: I0130 00:20:00.511190 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495540-hbbvk" Jan 30 00:20:00 crc kubenswrapper[5113]: I0130 00:20:00.738135 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495540-hbbvk"] Jan 30 00:20:00 crc kubenswrapper[5113]: I0130 00:20:00.880944 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495540-hbbvk" event={"ID":"41ff0f59-4052-4862-aa77-23c8f170082b","Type":"ContainerStarted","Data":"e4e58fa69283df9e7de339bbbcb62fd5063dabdb5251e72b84127add55f44621"} Jan 30 00:20:02 crc kubenswrapper[5113]: I0130 00:20:02.895721 5113 generic.go:358] "Generic (PLEG): container finished" podID="41ff0f59-4052-4862-aa77-23c8f170082b" containerID="0769b7379741b2af5c39c3380c9e5f271428b6267e7671fc02902fb1fc451d88" exitCode=0 Jan 30 00:20:02 crc kubenswrapper[5113]: I0130 00:20:02.895835 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495540-hbbvk" event={"ID":"41ff0f59-4052-4862-aa77-23c8f170082b","Type":"ContainerDied","Data":"0769b7379741b2af5c39c3380c9e5f271428b6267e7671fc02902fb1fc451d88"} Jan 30 00:20:04 crc kubenswrapper[5113]: I0130 00:20:04.215819 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495540-hbbvk" Jan 30 00:20:04 crc kubenswrapper[5113]: I0130 00:20:04.381243 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j67r8\" (UniqueName: \"kubernetes.io/projected/41ff0f59-4052-4862-aa77-23c8f170082b-kube-api-access-j67r8\") pod \"41ff0f59-4052-4862-aa77-23c8f170082b\" (UID: \"41ff0f59-4052-4862-aa77-23c8f170082b\") " Jan 30 00:20:04 crc kubenswrapper[5113]: I0130 00:20:04.393139 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41ff0f59-4052-4862-aa77-23c8f170082b-kube-api-access-j67r8" (OuterVolumeSpecName: "kube-api-access-j67r8") pod "41ff0f59-4052-4862-aa77-23c8f170082b" (UID: "41ff0f59-4052-4862-aa77-23c8f170082b"). InnerVolumeSpecName "kube-api-access-j67r8". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:20:04 crc kubenswrapper[5113]: I0130 00:20:04.483341 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-j67r8\" (UniqueName: \"kubernetes.io/projected/41ff0f59-4052-4862-aa77-23c8f170082b-kube-api-access-j67r8\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:04 crc kubenswrapper[5113]: I0130 00:20:04.913909 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495540-hbbvk" Jan 30 00:20:04 crc kubenswrapper[5113]: I0130 00:20:04.913926 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495540-hbbvk" event={"ID":"41ff0f59-4052-4862-aa77-23c8f170082b","Type":"ContainerDied","Data":"e4e58fa69283df9e7de339bbbcb62fd5063dabdb5251e72b84127add55f44621"} Jan 30 00:20:04 crc kubenswrapper[5113]: I0130 00:20:04.914463 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e4e58fa69283df9e7de339bbbcb62fd5063dabdb5251e72b84127add55f44621" Jan 30 00:20:21 crc kubenswrapper[5113]: I0130 00:20:21.195289 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:20:21 crc kubenswrapper[5113]: I0130 00:20:21.198391 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:20:21 crc kubenswrapper[5113]: I0130 00:20:21.198658 5113 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:20:21 crc kubenswrapper[5113]: I0130 00:20:21.199895 5113 kuberuntime_manager.go:1107] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d8d25d4044f296bffb6b6885c2a6830e6967a1638a903216ca33f356c73951bf"} pod="openshift-machine-config-operator/machine-config-daemon-gxph5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 00:20:21 crc kubenswrapper[5113]: I0130 00:20:21.200122 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" containerID="cri-o://d8d25d4044f296bffb6b6885c2a6830e6967a1638a903216ca33f356c73951bf" gracePeriod=600 Jan 30 00:20:21 crc kubenswrapper[5113]: I0130 00:20:21.343875 5113 provider.go:93] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 00:20:21 crc kubenswrapper[5113]: I0130 00:20:21.849128 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx"] Jan 30 00:20:21 crc kubenswrapper[5113]: I0130 00:20:21.850132 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" podUID="27d4d422-313b-48d2-b7ec-7e914beaac62" containerName="kube-rbac-proxy" containerID="cri-o://84e0d23e9d709e289a86f7f346beec9de5fb61424d6a78b2237daae057d88a5d" gracePeriod=30 Jan 30 00:20:21 crc kubenswrapper[5113]: I0130 00:20:21.850356 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" podUID="27d4d422-313b-48d2-b7ec-7e914beaac62" containerName="ovnkube-cluster-manager" containerID="cri-o://ed1c2109516e4d4faa67a368e46ac6d8d5c3984826eac23b06abb147d7d05bd4" gracePeriod=30 Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.047331 5113 generic.go:358] "Generic (PLEG): container finished" podID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerID="d8d25d4044f296bffb6b6885c2a6830e6967a1638a903216ca33f356c73951bf" exitCode=0 Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.047403 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" event={"ID":"dccb6bc1-d2db-4bf2-a0db-1c84219d0499","Type":"ContainerDied","Data":"d8d25d4044f296bffb6b6885c2a6830e6967a1638a903216ca33f356c73951bf"} Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.047468 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" event={"ID":"dccb6bc1-d2db-4bf2-a0db-1c84219d0499","Type":"ContainerStarted","Data":"dbf3b234b28fc071e0fac78c4e03f35f6c4a815279840f68d3fec8f928bdd4c7"} Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.047496 5113 scope.go:117] "RemoveContainer" containerID="d2c027b2bad57e7da7ce244d6807304cf78d526f9fcc9b86a42e7865e2bae0e7" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.050688 5113 generic.go:358] "Generic (PLEG): container finished" podID="27d4d422-313b-48d2-b7ec-7e914beaac62" containerID="ed1c2109516e4d4faa67a368e46ac6d8d5c3984826eac23b06abb147d7d05bd4" exitCode=0 Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.050737 5113 generic.go:358] "Generic (PLEG): container finished" podID="27d4d422-313b-48d2-b7ec-7e914beaac62" containerID="84e0d23e9d709e289a86f7f346beec9de5fb61424d6a78b2237daae057d88a5d" exitCode=0 Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.050766 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" event={"ID":"27d4d422-313b-48d2-b7ec-7e914beaac62","Type":"ContainerDied","Data":"ed1c2109516e4d4faa67a368e46ac6d8d5c3984826eac23b06abb147d7d05bd4"} Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.050819 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" event={"ID":"27d4d422-313b-48d2-b7ec-7e914beaac62","Type":"ContainerDied","Data":"84e0d23e9d709e289a86f7f346beec9de5fb61424d6a78b2237daae057d88a5d"} Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.050831 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" event={"ID":"27d4d422-313b-48d2-b7ec-7e914beaac62","Type":"ContainerDied","Data":"97f0c491d44b4d346a618e810d275e9d2af83e61b47df67417a17065120b856e"} Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.050843 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97f0c491d44b4d346a618e810d275e9d2af83e61b47df67417a17065120b856e" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.075732 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-724qr"] Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.076438 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="ovn-controller" containerID="cri-o://7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a" gracePeriod=30 Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.076501 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166" gracePeriod=30 Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.076570 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="northd" containerID="cri-o://1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20" gracePeriod=30 Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.076653 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="kube-rbac-proxy-node" containerID="cri-o://1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6" gracePeriod=30 Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.076695 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="ovn-acl-logging" containerID="cri-o://b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c" gracePeriod=30 Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.076711 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="sbdb" containerID="cri-o://151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb" gracePeriod=30 Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.076791 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="nbdb" containerID="cri-o://07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07" gracePeriod=30 Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.108267 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="ovnkube-controller" containerID="cri-o://4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847" gracePeriod=30 Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.134850 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.219024 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr"] Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.220273 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="41ff0f59-4052-4862-aa77-23c8f170082b" containerName="oc" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.220293 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="41ff0f59-4052-4862-aa77-23c8f170082b" containerName="oc" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.220310 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="27d4d422-313b-48d2-b7ec-7e914beaac62" containerName="kube-rbac-proxy" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.220318 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="27d4d422-313b-48d2-b7ec-7e914beaac62" containerName="kube-rbac-proxy" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.220327 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="27d4d422-313b-48d2-b7ec-7e914beaac62" containerName="ovnkube-cluster-manager" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.220333 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="27d4d422-313b-48d2-b7ec-7e914beaac62" containerName="ovnkube-cluster-manager" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.220475 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="41ff0f59-4052-4862-aa77-23c8f170082b" containerName="oc" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.220491 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="27d4d422-313b-48d2-b7ec-7e914beaac62" containerName="kube-rbac-proxy" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.220502 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="27d4d422-313b-48d2-b7ec-7e914beaac62" containerName="ovnkube-cluster-manager" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.226003 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.267332 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/27d4d422-313b-48d2-b7ec-7e914beaac62-ovnkube-config\") pod \"27d4d422-313b-48d2-b7ec-7e914beaac62\" (UID: \"27d4d422-313b-48d2-b7ec-7e914beaac62\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.267419 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/27d4d422-313b-48d2-b7ec-7e914beaac62-env-overrides\") pod \"27d4d422-313b-48d2-b7ec-7e914beaac62\" (UID: \"27d4d422-313b-48d2-b7ec-7e914beaac62\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.267456 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6xzs\" (UniqueName: \"kubernetes.io/projected/27d4d422-313b-48d2-b7ec-7e914beaac62-kube-api-access-g6xzs\") pod \"27d4d422-313b-48d2-b7ec-7e914beaac62\" (UID: \"27d4d422-313b-48d2-b7ec-7e914beaac62\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.267486 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/27d4d422-313b-48d2-b7ec-7e914beaac62-ovn-control-plane-metrics-cert\") pod \"27d4d422-313b-48d2-b7ec-7e914beaac62\" (UID: \"27d4d422-313b-48d2-b7ec-7e914beaac62\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.268937 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27d4d422-313b-48d2-b7ec-7e914beaac62-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "27d4d422-313b-48d2-b7ec-7e914beaac62" (UID: "27d4d422-313b-48d2-b7ec-7e914beaac62"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.269470 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27d4d422-313b-48d2-b7ec-7e914beaac62-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "27d4d422-313b-48d2-b7ec-7e914beaac62" (UID: "27d4d422-313b-48d2-b7ec-7e914beaac62"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.286057 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27d4d422-313b-48d2-b7ec-7e914beaac62-kube-api-access-g6xzs" (OuterVolumeSpecName: "kube-api-access-g6xzs") pod "27d4d422-313b-48d2-b7ec-7e914beaac62" (UID: "27d4d422-313b-48d2-b7ec-7e914beaac62"). InnerVolumeSpecName "kube-api-access-g6xzs". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.300730 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27d4d422-313b-48d2-b7ec-7e914beaac62-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "27d4d422-313b-48d2-b7ec-7e914beaac62" (UID: "27d4d422-313b-48d2-b7ec-7e914beaac62"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.369846 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrl5z\" (UniqueName: \"kubernetes.io/projected/2d403bd5-dd26-46c5-9590-8bc88b7988d1-kube-api-access-rrl5z\") pod \"ovnkube-control-plane-97c9b6c48-kk8rr\" (UID: \"2d403bd5-dd26-46c5-9590-8bc88b7988d1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.369940 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2d403bd5-dd26-46c5-9590-8bc88b7988d1-ovnkube-config\") pod \"ovnkube-control-plane-97c9b6c48-kk8rr\" (UID: \"2d403bd5-dd26-46c5-9590-8bc88b7988d1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.370079 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2d403bd5-dd26-46c5-9590-8bc88b7988d1-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-97c9b6c48-kk8rr\" (UID: \"2d403bd5-dd26-46c5-9590-8bc88b7988d1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.370132 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2d403bd5-dd26-46c5-9590-8bc88b7988d1-env-overrides\") pod \"ovnkube-control-plane-97c9b6c48-kk8rr\" (UID: \"2d403bd5-dd26-46c5-9590-8bc88b7988d1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.370202 5113 reconciler_common.go:299] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/27d4d422-313b-48d2-b7ec-7e914beaac62-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.370216 5113 reconciler_common.go:299] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/27d4d422-313b-48d2-b7ec-7e914beaac62-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.370226 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-g6xzs\" (UniqueName: \"kubernetes.io/projected/27d4d422-313b-48d2-b7ec-7e914beaac62-kube-api-access-g6xzs\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.370237 5113 reconciler_common.go:299] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/27d4d422-313b-48d2-b7ec-7e914beaac62-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.438503 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-724qr_6740364c-f52c-49d7-9841-823aa6f3894b/ovn-acl-logging/0.log" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.439562 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-724qr_6740364c-f52c-49d7-9841-823aa6f3894b/ovn-controller/0.log" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.440266 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.472014 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2d403bd5-dd26-46c5-9590-8bc88b7988d1-env-overrides\") pod \"ovnkube-control-plane-97c9b6c48-kk8rr\" (UID: \"2d403bd5-dd26-46c5-9590-8bc88b7988d1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.472147 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-rrl5z\" (UniqueName: \"kubernetes.io/projected/2d403bd5-dd26-46c5-9590-8bc88b7988d1-kube-api-access-rrl5z\") pod \"ovnkube-control-plane-97c9b6c48-kk8rr\" (UID: \"2d403bd5-dd26-46c5-9590-8bc88b7988d1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.472192 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2d403bd5-dd26-46c5-9590-8bc88b7988d1-ovnkube-config\") pod \"ovnkube-control-plane-97c9b6c48-kk8rr\" (UID: \"2d403bd5-dd26-46c5-9590-8bc88b7988d1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.472323 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2d403bd5-dd26-46c5-9590-8bc88b7988d1-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-97c9b6c48-kk8rr\" (UID: \"2d403bd5-dd26-46c5-9590-8bc88b7988d1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.473178 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2d403bd5-dd26-46c5-9590-8bc88b7988d1-env-overrides\") pod \"ovnkube-control-plane-97c9b6c48-kk8rr\" (UID: \"2d403bd5-dd26-46c5-9590-8bc88b7988d1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.474022 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2d403bd5-dd26-46c5-9590-8bc88b7988d1-ovnkube-config\") pod \"ovnkube-control-plane-97c9b6c48-kk8rr\" (UID: \"2d403bd5-dd26-46c5-9590-8bc88b7988d1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.487747 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2d403bd5-dd26-46c5-9590-8bc88b7988d1-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-97c9b6c48-kk8rr\" (UID: \"2d403bd5-dd26-46c5-9590-8bc88b7988d1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.507944 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrl5z\" (UniqueName: \"kubernetes.io/projected/2d403bd5-dd26-46c5-9590-8bc88b7988d1-kube-api-access-rrl5z\") pod \"ovnkube-control-plane-97c9b6c48-kk8rr\" (UID: \"2d403bd5-dd26-46c5-9590-8bc88b7988d1\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.513040 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-77pcp"] Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.513829 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="sbdb" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.513855 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="sbdb" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.513872 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="kube-rbac-proxy-node" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.513878 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="kube-rbac-proxy-node" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.513887 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="kube-rbac-proxy-ovn-metrics" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.513896 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="kube-rbac-proxy-ovn-metrics" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.513912 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="ovnkube-controller" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.513920 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="ovnkube-controller" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.513933 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="nbdb" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.513940 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="nbdb" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.513959 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="ovn-controller" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.513966 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="ovn-controller" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.513981 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="northd" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.513988 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="northd" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.513998 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="kubecfg-setup" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.514020 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="kubecfg-setup" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.514033 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="ovn-acl-logging" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.514040 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="ovn-acl-logging" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.514165 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="nbdb" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.514182 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="northd" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.514195 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="ovnkube-controller" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.514205 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="ovn-controller" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.514214 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="kube-rbac-proxy-node" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.514223 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="kube-rbac-proxy-ovn-metrics" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.514233 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="sbdb" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.514241 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" containerName="ovn-acl-logging" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.519763 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.548127 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.573209 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-cni-netd\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.573263 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-cni-bin\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.573332 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-openvswitch\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.573366 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.573412 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.573464 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-ovnkube-script-lib\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.573511 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6740364c-f52c-49d7-9841-823aa6f3894b-ovn-node-metrics-cert\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.573508 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.574318 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-run-ovn-kubernetes\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.574355 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.574373 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-slash\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.574400 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-etc-openvswitch\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.574443 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-env-overrides\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.574447 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-slash" (OuterVolumeSpecName: "host-slash") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.574461 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.574469 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-ovn\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.574485 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.574491 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.574614 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2hczl\" (UniqueName: \"kubernetes.io/projected/6740364c-f52c-49d7-9841-823aa6f3894b-kube-api-access-2hczl\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.574894 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-var-lib-openvswitch\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.574925 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.574988 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575003 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-systemd-units\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575032 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-systemd\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575052 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575061 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-node-log\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575089 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-node-log" (OuterVolumeSpecName: "node-log") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575158 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-log-socket\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575244 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-ovnkube-config\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575244 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-log-socket" (OuterVolumeSpecName: "log-socket") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575273 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-kubelet\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575313 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575336 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-run-netns\") pod \"6740364c-f52c-49d7-9841-823aa6f3894b\" (UID: \"6740364c-f52c-49d7-9841-823aa6f3894b\") " Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575351 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575449 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575445 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575719 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575917 5113 reconciler_common.go:299] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575940 5113 reconciler_common.go:299] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575954 5113 reconciler_common.go:299] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575968 5113 reconciler_common.go:299] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575980 5113 reconciler_common.go:299] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.575992 5113 reconciler_common.go:299] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-node-log\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.576004 5113 reconciler_common.go:299] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-log-socket\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.576016 5113 reconciler_common.go:299] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.576028 5113 reconciler_common.go:299] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.576041 5113 reconciler_common.go:299] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.576057 5113 reconciler_common.go:299] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.576070 5113 reconciler_common.go:299] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.576083 5113 reconciler_common.go:299] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.576094 5113 reconciler_common.go:299] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.576106 5113 reconciler_common.go:299] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6740364c-f52c-49d7-9841-823aa6f3894b-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.576119 5113 reconciler_common.go:299] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.576131 5113 reconciler_common.go:299] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-host-slash\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.579578 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6740364c-f52c-49d7-9841-823aa6f3894b-kube-api-access-2hczl" (OuterVolumeSpecName: "kube-api-access-2hczl") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "kube-api-access-2hczl". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.580274 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6740364c-f52c-49d7-9841-823aa6f3894b-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.590647 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "6740364c-f52c-49d7-9841-823aa6f3894b" (UID: "6740364c-f52c-49d7-9841-823aa6f3894b"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGIDValue "" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.677711 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-cni-netd\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.677841 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-kubelet\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.677908 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/738c187f-4b02-490f-86e7-e24c00e7700c-env-overrides\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.677982 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-run-systemd\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.678360 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-node-log\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.678412 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/738c187f-4b02-490f-86e7-e24c00e7700c-ovnkube-script-lib\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.678442 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/738c187f-4b02-490f-86e7-e24c00e7700c-ovn-node-metrics-cert\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.678465 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-run-openvswitch\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.678494 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-run-ovn\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.678638 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-etc-openvswitch\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.678755 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/738c187f-4b02-490f-86e7-e24c00e7700c-ovnkube-config\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.678804 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-systemd-units\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.678828 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-var-lib-openvswitch\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.678853 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-run-ovn-kubernetes\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.678888 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.678969 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-log-socket\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.678996 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-cni-bin\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.679028 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tcqs5\" (UniqueName: \"kubernetes.io/projected/738c187f-4b02-490f-86e7-e24c00e7700c-kube-api-access-tcqs5\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.679059 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-slash\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.679086 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-run-netns\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.679163 5113 reconciler_common.go:299] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6740364c-f52c-49d7-9841-823aa6f3894b-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.679183 5113 reconciler_common.go:299] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6740364c-f52c-49d7-9841-823aa6f3894b-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.679199 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-2hczl\" (UniqueName: \"kubernetes.io/projected/6740364c-f52c-49d7-9841-823aa6f3894b-kube-api-access-2hczl\") on node \"crc\" DevicePath \"\"" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.779886 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-run-openvswitch\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780023 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-run-ovn\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.779983 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-run-openvswitch\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780052 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-etc-openvswitch\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780131 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-etc-openvswitch\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780189 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-run-ovn\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780248 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/738c187f-4b02-490f-86e7-e24c00e7700c-ovnkube-config\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780385 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-systemd-units\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780420 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-var-lib-openvswitch\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780451 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-run-ovn-kubernetes\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780480 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-systemd-units\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780490 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780514 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-var-lib-openvswitch\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780555 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780568 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-run-ovn-kubernetes\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780622 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-log-socket\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780640 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-cni-bin\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780672 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-tcqs5\" (UniqueName: \"kubernetes.io/projected/738c187f-4b02-490f-86e7-e24c00e7700c-kube-api-access-tcqs5\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780689 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-log-socket\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780706 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-slash\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780725 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-run-netns\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780729 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-cni-bin\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780765 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-slash\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780777 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-cni-netd\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780837 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-kubelet\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780918 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/738c187f-4b02-490f-86e7-e24c00e7700c-env-overrides\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780945 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-run-systemd\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.780977 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-node-log\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.781014 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/738c187f-4b02-490f-86e7-e24c00e7700c-ovnkube-script-lib\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.781034 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/738c187f-4b02-490f-86e7-e24c00e7700c-ovn-node-metrics-cert\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.781183 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/738c187f-4b02-490f-86e7-e24c00e7700c-ovnkube-config\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.781300 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-node-log\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.781333 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-run-systemd\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.781372 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-cni-netd\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.781565 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-run-netns\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.781809 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/738c187f-4b02-490f-86e7-e24c00e7700c-env-overrides\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.781874 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/738c187f-4b02-490f-86e7-e24c00e7700c-host-kubelet\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.782168 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/738c187f-4b02-490f-86e7-e24c00e7700c-ovnkube-script-lib\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.789508 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/738c187f-4b02-490f-86e7-e24c00e7700c-ovn-node-metrics-cert\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.798779 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-tcqs5\" (UniqueName: \"kubernetes.io/projected/738c187f-4b02-490f-86e7-e24c00e7700c-kube-api-access-tcqs5\") pod \"ovnkube-node-77pcp\" (UID: \"738c187f-4b02-490f-86e7-e24c00e7700c\") " pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:22 crc kubenswrapper[5113]: I0130 00:20:22.844988 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.063145 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mbd62_8ad95d7b-7c01-4672-8614-0cc8e52c0d79/kube-multus/0.log" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.063219 5113 generic.go:358] "Generic (PLEG): container finished" podID="8ad95d7b-7c01-4672-8614-0cc8e52c0d79" containerID="de200dfacd4aa9bc81f1ab585e923e29b0513c4c2d438d2206ba2325a4b9faab" exitCode=2 Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.063341 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mbd62" event={"ID":"8ad95d7b-7c01-4672-8614-0cc8e52c0d79","Type":"ContainerDied","Data":"de200dfacd4aa9bc81f1ab585e923e29b0513c4c2d438d2206ba2325a4b9faab"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.064331 5113 scope.go:117] "RemoveContainer" containerID="de200dfacd4aa9bc81f1ab585e923e29b0513c4c2d438d2206ba2325a4b9faab" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.071058 5113 generic.go:358] "Generic (PLEG): container finished" podID="738c187f-4b02-490f-86e7-e24c00e7700c" containerID="c882aca5bc033d2f23886ee69dea9ed2658599916749cde02e5b6281c50a3f65" exitCode=0 Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.071125 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" event={"ID":"738c187f-4b02-490f-86e7-e24c00e7700c","Type":"ContainerDied","Data":"c882aca5bc033d2f23886ee69dea9ed2658599916749cde02e5b6281c50a3f65"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.071182 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" event={"ID":"738c187f-4b02-490f-86e7-e24c00e7700c","Type":"ContainerStarted","Data":"30cc8d669d9f33bb977c6bdae81f629b752b751a59dae40ab470cbc3ae38629f"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.077725 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-724qr_6740364c-f52c-49d7-9841-823aa6f3894b/ovn-acl-logging/0.log" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078202 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-724qr_6740364c-f52c-49d7-9841-823aa6f3894b/ovn-controller/0.log" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078637 5113 generic.go:358] "Generic (PLEG): container finished" podID="6740364c-f52c-49d7-9841-823aa6f3894b" containerID="4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847" exitCode=0 Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078661 5113 generic.go:358] "Generic (PLEG): container finished" podID="6740364c-f52c-49d7-9841-823aa6f3894b" containerID="151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb" exitCode=0 Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078673 5113 generic.go:358] "Generic (PLEG): container finished" podID="6740364c-f52c-49d7-9841-823aa6f3894b" containerID="07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07" exitCode=0 Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078680 5113 generic.go:358] "Generic (PLEG): container finished" podID="6740364c-f52c-49d7-9841-823aa6f3894b" containerID="1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20" exitCode=0 Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078712 5113 generic.go:358] "Generic (PLEG): container finished" podID="6740364c-f52c-49d7-9841-823aa6f3894b" containerID="fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166" exitCode=0 Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078722 5113 generic.go:358] "Generic (PLEG): container finished" podID="6740364c-f52c-49d7-9841-823aa6f3894b" containerID="1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6" exitCode=0 Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078733 5113 generic.go:358] "Generic (PLEG): container finished" podID="6740364c-f52c-49d7-9841-823aa6f3894b" containerID="b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c" exitCode=143 Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078742 5113 generic.go:358] "Generic (PLEG): container finished" podID="6740364c-f52c-49d7-9841-823aa6f3894b" containerID="7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a" exitCode=143 Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078760 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerDied","Data":"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078844 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerDied","Data":"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078862 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerDied","Data":"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078874 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerDied","Data":"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078888 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerDied","Data":"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078901 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerDied","Data":"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078904 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078916 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078934 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078942 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078953 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerDied","Data":"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078966 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078975 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078972 5113 scope.go:117] "RemoveContainer" containerID="4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.078982 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079129 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079138 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079145 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079152 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079159 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079166 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079179 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerDied","Data":"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079193 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079204 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079212 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079219 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079226 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079233 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079240 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079246 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079253 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079263 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-724qr" event={"ID":"6740364c-f52c-49d7-9841-823aa6f3894b","Type":"ContainerDied","Data":"d10209b0c9210bbafea1e5f0426d9e2ccd7ec64ba9f477fdd8b59dbc8f4a1ae1"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079278 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079287 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079293 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079301 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079308 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079315 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079322 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079328 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.079334 5113 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.088924 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.088961 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" event={"ID":"2d403bd5-dd26-46c5-9590-8bc88b7988d1","Type":"ContainerStarted","Data":"3ed11f505a03a0851312442ce27bb56cb69f05f4fc16b86d58df8b2ea77840d0"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.089039 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" event={"ID":"2d403bd5-dd26-46c5-9590-8bc88b7988d1","Type":"ContainerStarted","Data":"e570aa134d3b506dab9ca3facf4dd94c6107cbb59ebce25027a0348bac98c7b5"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.089050 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" event={"ID":"2d403bd5-dd26-46c5-9590-8bc88b7988d1","Type":"ContainerStarted","Data":"9b071ad295938ca006a646143aae56b9e4d10523ad960555ca0a83cce1095049"} Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.100515 5113 scope.go:117] "RemoveContainer" containerID="151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.132647 5113 scope.go:117] "RemoveContainer" containerID="07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.148969 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx"] Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.153075 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-57b78d8988-trlrx"] Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.167073 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-724qr"] Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.171314 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-724qr"] Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.184137 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-97c9b6c48-kk8rr" podStartSLOduration=2.184107774 podStartE2EDuration="2.184107774s" podCreationTimestamp="2026-01-30 00:20:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:20:23.182193314 +0000 UTC m=+643.254798701" watchObservedRunningTime="2026-01-30 00:20:23.184107774 +0000 UTC m=+643.256713151" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.196852 5113 scope.go:117] "RemoveContainer" containerID="1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.227181 5113 scope.go:117] "RemoveContainer" containerID="fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.246599 5113 scope.go:117] "RemoveContainer" containerID="1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.262490 5113 scope.go:117] "RemoveContainer" containerID="b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.290606 5113 scope.go:117] "RemoveContainer" containerID="7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.310202 5113 scope.go:117] "RemoveContainer" containerID="3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.326306 5113 scope.go:117] "RemoveContainer" containerID="4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847" Jan 30 00:20:23 crc kubenswrapper[5113]: E0130 00:20:23.326823 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847\": container with ID starting with 4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847 not found: ID does not exist" containerID="4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.326871 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847"} err="failed to get container status \"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847\": rpc error: code = NotFound desc = could not find container \"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847\": container with ID starting with 4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.326901 5113 scope.go:117] "RemoveContainer" containerID="151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb" Jan 30 00:20:23 crc kubenswrapper[5113]: E0130 00:20:23.327280 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb\": container with ID starting with 151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb not found: ID does not exist" containerID="151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.327322 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb"} err="failed to get container status \"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb\": rpc error: code = NotFound desc = could not find container \"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb\": container with ID starting with 151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.327348 5113 scope.go:117] "RemoveContainer" containerID="07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07" Jan 30 00:20:23 crc kubenswrapper[5113]: E0130 00:20:23.327649 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07\": container with ID starting with 07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07 not found: ID does not exist" containerID="07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.327681 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07"} err="failed to get container status \"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07\": rpc error: code = NotFound desc = could not find container \"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07\": container with ID starting with 07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.327722 5113 scope.go:117] "RemoveContainer" containerID="1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20" Jan 30 00:20:23 crc kubenswrapper[5113]: E0130 00:20:23.328243 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20\": container with ID starting with 1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20 not found: ID does not exist" containerID="1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.328267 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20"} err="failed to get container status \"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20\": rpc error: code = NotFound desc = could not find container \"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20\": container with ID starting with 1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.328283 5113 scope.go:117] "RemoveContainer" containerID="fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166" Jan 30 00:20:23 crc kubenswrapper[5113]: E0130 00:20:23.328573 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166\": container with ID starting with fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166 not found: ID does not exist" containerID="fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.328597 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166"} err="failed to get container status \"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166\": rpc error: code = NotFound desc = could not find container \"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166\": container with ID starting with fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.328612 5113 scope.go:117] "RemoveContainer" containerID="1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6" Jan 30 00:20:23 crc kubenswrapper[5113]: E0130 00:20:23.328891 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6\": container with ID starting with 1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6 not found: ID does not exist" containerID="1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.328932 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6"} err="failed to get container status \"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6\": rpc error: code = NotFound desc = could not find container \"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6\": container with ID starting with 1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.328960 5113 scope.go:117] "RemoveContainer" containerID="b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c" Jan 30 00:20:23 crc kubenswrapper[5113]: E0130 00:20:23.329182 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c\": container with ID starting with b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c not found: ID does not exist" containerID="b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.329220 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c"} err="failed to get container status \"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c\": rpc error: code = NotFound desc = could not find container \"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c\": container with ID starting with b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.329243 5113 scope.go:117] "RemoveContainer" containerID="7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a" Jan 30 00:20:23 crc kubenswrapper[5113]: E0130 00:20:23.329595 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a\": container with ID starting with 7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a not found: ID does not exist" containerID="7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.329621 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a"} err="failed to get container status \"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a\": rpc error: code = NotFound desc = could not find container \"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a\": container with ID starting with 7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.329640 5113 scope.go:117] "RemoveContainer" containerID="3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7" Jan 30 00:20:23 crc kubenswrapper[5113]: E0130 00:20:23.329873 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7\": container with ID starting with 3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7 not found: ID does not exist" containerID="3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.329899 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7"} err="failed to get container status \"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7\": rpc error: code = NotFound desc = could not find container \"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7\": container with ID starting with 3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.329914 5113 scope.go:117] "RemoveContainer" containerID="4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.330092 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847"} err="failed to get container status \"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847\": rpc error: code = NotFound desc = could not find container \"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847\": container with ID starting with 4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.330111 5113 scope.go:117] "RemoveContainer" containerID="151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.330391 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb"} err="failed to get container status \"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb\": rpc error: code = NotFound desc = could not find container \"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb\": container with ID starting with 151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.330413 5113 scope.go:117] "RemoveContainer" containerID="07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.330863 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07"} err="failed to get container status \"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07\": rpc error: code = NotFound desc = could not find container \"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07\": container with ID starting with 07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.330888 5113 scope.go:117] "RemoveContainer" containerID="1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.331115 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20"} err="failed to get container status \"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20\": rpc error: code = NotFound desc = could not find container \"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20\": container with ID starting with 1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.331137 5113 scope.go:117] "RemoveContainer" containerID="fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.331346 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166"} err="failed to get container status \"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166\": rpc error: code = NotFound desc = could not find container \"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166\": container with ID starting with fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.331365 5113 scope.go:117] "RemoveContainer" containerID="1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.331556 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6"} err="failed to get container status \"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6\": rpc error: code = NotFound desc = could not find container \"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6\": container with ID starting with 1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.331575 5113 scope.go:117] "RemoveContainer" containerID="b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.331757 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c"} err="failed to get container status \"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c\": rpc error: code = NotFound desc = could not find container \"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c\": container with ID starting with b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.331816 5113 scope.go:117] "RemoveContainer" containerID="7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.332056 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a"} err="failed to get container status \"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a\": rpc error: code = NotFound desc = could not find container \"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a\": container with ID starting with 7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.332077 5113 scope.go:117] "RemoveContainer" containerID="3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.332325 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7"} err="failed to get container status \"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7\": rpc error: code = NotFound desc = could not find container \"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7\": container with ID starting with 3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.332354 5113 scope.go:117] "RemoveContainer" containerID="4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.332584 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847"} err="failed to get container status \"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847\": rpc error: code = NotFound desc = could not find container \"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847\": container with ID starting with 4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.332606 5113 scope.go:117] "RemoveContainer" containerID="151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.332946 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb"} err="failed to get container status \"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb\": rpc error: code = NotFound desc = could not find container \"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb\": container with ID starting with 151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.332994 5113 scope.go:117] "RemoveContainer" containerID="07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.333301 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07"} err="failed to get container status \"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07\": rpc error: code = NotFound desc = could not find container \"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07\": container with ID starting with 07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.333323 5113 scope.go:117] "RemoveContainer" containerID="1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.333565 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20"} err="failed to get container status \"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20\": rpc error: code = NotFound desc = could not find container \"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20\": container with ID starting with 1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.333594 5113 scope.go:117] "RemoveContainer" containerID="fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.333855 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166"} err="failed to get container status \"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166\": rpc error: code = NotFound desc = could not find container \"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166\": container with ID starting with fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.333880 5113 scope.go:117] "RemoveContainer" containerID="1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.334286 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6"} err="failed to get container status \"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6\": rpc error: code = NotFound desc = could not find container \"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6\": container with ID starting with 1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.334309 5113 scope.go:117] "RemoveContainer" containerID="b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.334631 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c"} err="failed to get container status \"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c\": rpc error: code = NotFound desc = could not find container \"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c\": container with ID starting with b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.334654 5113 scope.go:117] "RemoveContainer" containerID="7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.334873 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a"} err="failed to get container status \"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a\": rpc error: code = NotFound desc = could not find container \"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a\": container with ID starting with 7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.334896 5113 scope.go:117] "RemoveContainer" containerID="3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.335238 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7"} err="failed to get container status \"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7\": rpc error: code = NotFound desc = could not find container \"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7\": container with ID starting with 3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.335265 5113 scope.go:117] "RemoveContainer" containerID="4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.335566 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847"} err="failed to get container status \"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847\": rpc error: code = NotFound desc = could not find container \"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847\": container with ID starting with 4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.335612 5113 scope.go:117] "RemoveContainer" containerID="151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.335921 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb"} err="failed to get container status \"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb\": rpc error: code = NotFound desc = could not find container \"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb\": container with ID starting with 151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.335952 5113 scope.go:117] "RemoveContainer" containerID="07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.336272 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07"} err="failed to get container status \"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07\": rpc error: code = NotFound desc = could not find container \"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07\": container with ID starting with 07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.336296 5113 scope.go:117] "RemoveContainer" containerID="1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.336612 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20"} err="failed to get container status \"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20\": rpc error: code = NotFound desc = could not find container \"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20\": container with ID starting with 1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.336633 5113 scope.go:117] "RemoveContainer" containerID="fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.336928 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166"} err="failed to get container status \"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166\": rpc error: code = NotFound desc = could not find container \"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166\": container with ID starting with fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.336949 5113 scope.go:117] "RemoveContainer" containerID="1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.337181 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6"} err="failed to get container status \"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6\": rpc error: code = NotFound desc = could not find container \"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6\": container with ID starting with 1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.337203 5113 scope.go:117] "RemoveContainer" containerID="b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.337539 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c"} err="failed to get container status \"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c\": rpc error: code = NotFound desc = could not find container \"b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c\": container with ID starting with b8cc20b12ae83e6ef819cf3d08f4d2353012bc7a1055eea81c57e433f294d44c not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.337563 5113 scope.go:117] "RemoveContainer" containerID="7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.337830 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a"} err="failed to get container status \"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a\": rpc error: code = NotFound desc = could not find container \"7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a\": container with ID starting with 7178248850ec1c1669c35f6b558b3f8151c86b7519f65764eba011a550b6645a not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.337852 5113 scope.go:117] "RemoveContainer" containerID="3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.338108 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7"} err="failed to get container status \"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7\": rpc error: code = NotFound desc = could not find container \"3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7\": container with ID starting with 3957c232e65d01fd8e7cd081af8b0bb983a0d22e2cca1a2d60d7d4857be19db7 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.338137 5113 scope.go:117] "RemoveContainer" containerID="4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.338378 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847"} err="failed to get container status \"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847\": rpc error: code = NotFound desc = could not find container \"4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847\": container with ID starting with 4cba0d6e1014524374adff174fd5fce7b04807f9db3c5bdd611c23e158be0847 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.338397 5113 scope.go:117] "RemoveContainer" containerID="151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.338701 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb"} err="failed to get container status \"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb\": rpc error: code = NotFound desc = could not find container \"151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb\": container with ID starting with 151ae17c7b70658aa6f73b03ad38e51e5490b20c9a3aa5fdb058d66d84b2f0bb not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.338722 5113 scope.go:117] "RemoveContainer" containerID="07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.338945 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07"} err="failed to get container status \"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07\": rpc error: code = NotFound desc = could not find container \"07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07\": container with ID starting with 07f409a90feb4da9241f598d370166c927d244c693f6e3194cac05fdceaa8c07 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.338973 5113 scope.go:117] "RemoveContainer" containerID="1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.339244 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20"} err="failed to get container status \"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20\": rpc error: code = NotFound desc = could not find container \"1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20\": container with ID starting with 1b0c09e2ee6dbf93886c8318bd38c7b9857cc238eab511a17cc3e454a6a1fb20 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.339269 5113 scope.go:117] "RemoveContainer" containerID="fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.339511 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166"} err="failed to get container status \"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166\": rpc error: code = NotFound desc = could not find container \"fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166\": container with ID starting with fe267dfd3832f714aebbfbebfd4b880457e0e6628d0dc0f5b6397cb905a51166 not found: ID does not exist" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.339550 5113 scope.go:117] "RemoveContainer" containerID="1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6" Jan 30 00:20:23 crc kubenswrapper[5113]: I0130 00:20:23.339845 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6"} err="failed to get container status \"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6\": rpc error: code = NotFound desc = could not find container \"1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6\": container with ID starting with 1fbb534e146adf7c22b51c2c0fc084eb5b555abae0bc77b88d50c2f8d01576c6 not found: ID does not exist" Jan 30 00:20:24 crc kubenswrapper[5113]: I0130 00:20:24.100325 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mbd62_8ad95d7b-7c01-4672-8614-0cc8e52c0d79/kube-multus/0.log" Jan 30 00:20:24 crc kubenswrapper[5113]: I0130 00:20:24.101194 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-mbd62" event={"ID":"8ad95d7b-7c01-4672-8614-0cc8e52c0d79","Type":"ContainerStarted","Data":"28038b0eaa757845b0d41e890f21b6f45125fe49dcdbd6216f5c01efa31701ab"} Jan 30 00:20:24 crc kubenswrapper[5113]: I0130 00:20:24.108068 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" event={"ID":"738c187f-4b02-490f-86e7-e24c00e7700c","Type":"ContainerStarted","Data":"edcf67dd30a561882fc0a3c1dad48729d99e278007a560509c0c94268edb6ed9"} Jan 30 00:20:24 crc kubenswrapper[5113]: I0130 00:20:24.108119 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" event={"ID":"738c187f-4b02-490f-86e7-e24c00e7700c","Type":"ContainerStarted","Data":"81fa78ec82bc60dc1d02810f5b77f6b305290954859de8ee1c1ea7212637e491"} Jan 30 00:20:24 crc kubenswrapper[5113]: I0130 00:20:24.108135 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" event={"ID":"738c187f-4b02-490f-86e7-e24c00e7700c","Type":"ContainerStarted","Data":"80e0886a992f8c303261cb86bf90760df26f7fd51e709d58466a3734e817cc16"} Jan 30 00:20:24 crc kubenswrapper[5113]: I0130 00:20:24.108145 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" event={"ID":"738c187f-4b02-490f-86e7-e24c00e7700c","Type":"ContainerStarted","Data":"948bb1b1734efcdd7edb260161a72c03c95cc8ceea2d4e07a325847e1f6f4113"} Jan 30 00:20:24 crc kubenswrapper[5113]: I0130 00:20:24.108158 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" event={"ID":"738c187f-4b02-490f-86e7-e24c00e7700c","Type":"ContainerStarted","Data":"42cac3b88462406879d5b2391b235cd30581c8280f28932d2abf8841bb8e1f9d"} Jan 30 00:20:24 crc kubenswrapper[5113]: I0130 00:20:24.108167 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" event={"ID":"738c187f-4b02-490f-86e7-e24c00e7700c","Type":"ContainerStarted","Data":"b66298d8d7dc60b3f4fc557d274c5b4f5257a59afda6ad4853ae05d662191242"} Jan 30 00:20:24 crc kubenswrapper[5113]: I0130 00:20:24.785486 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27d4d422-313b-48d2-b7ec-7e914beaac62" path="/var/lib/kubelet/pods/27d4d422-313b-48d2-b7ec-7e914beaac62/volumes" Jan 30 00:20:24 crc kubenswrapper[5113]: I0130 00:20:24.787061 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6740364c-f52c-49d7-9841-823aa6f3894b" path="/var/lib/kubelet/pods/6740364c-f52c-49d7-9841-823aa6f3894b/volumes" Jan 30 00:20:27 crc kubenswrapper[5113]: I0130 00:20:27.140706 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" event={"ID":"738c187f-4b02-490f-86e7-e24c00e7700c","Type":"ContainerStarted","Data":"fad0ce84915b74ec5e6d25ba4c9b92e8c6cc64ab974e93f5ffa01bba92711891"} Jan 30 00:20:29 crc kubenswrapper[5113]: I0130 00:20:29.160432 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" event={"ID":"738c187f-4b02-490f-86e7-e24c00e7700c","Type":"ContainerStarted","Data":"55c860c4d3bdf2b6e77c0ea9967cf517a39e897e8352ceb554203b5329efb715"} Jan 30 00:20:29 crc kubenswrapper[5113]: I0130 00:20:29.161583 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:29 crc kubenswrapper[5113]: I0130 00:20:29.161600 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:29 crc kubenswrapper[5113]: I0130 00:20:29.194416 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:29 crc kubenswrapper[5113]: I0130 00:20:29.196435 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" podStartSLOduration=7.196412198 podStartE2EDuration="7.196412198s" podCreationTimestamp="2026-01-30 00:20:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:20:29.19191123 +0000 UTC m=+649.264516607" watchObservedRunningTime="2026-01-30 00:20:29.196412198 +0000 UTC m=+649.269017575" Jan 30 00:20:30 crc kubenswrapper[5113]: I0130 00:20:30.170973 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:30 crc kubenswrapper[5113]: I0130 00:20:30.224348 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:20:41 crc kubenswrapper[5113]: I0130 00:20:41.885431 5113 scope.go:117] "RemoveContainer" containerID="ed1c2109516e4d4faa67a368e46ac6d8d5c3984826eac23b06abb147d7d05bd4" Jan 30 00:20:41 crc kubenswrapper[5113]: I0130 00:20:41.909038 5113 scope.go:117] "RemoveContainer" containerID="84e0d23e9d709e289a86f7f346beec9de5fb61424d6a78b2237daae057d88a5d" Jan 30 00:21:02 crc kubenswrapper[5113]: I0130 00:21:02.214063 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-77pcp" Jan 30 00:21:22 crc kubenswrapper[5113]: I0130 00:21:22.796571 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zw2z8"] Jan 30 00:21:22 crc kubenswrapper[5113]: I0130 00:21:22.800904 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-zw2z8" podUID="909f168f-fd5f-4b75-909e-514deaea8397" containerName="registry-server" containerID="cri-o://0d5b0bfd222e4a49c467c7c5f12dcde65bd95681f1583653d117ba745ebaf67b" gracePeriod=30 Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.203767 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zw2z8" Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.291588 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/909f168f-fd5f-4b75-909e-514deaea8397-catalog-content\") pod \"909f168f-fd5f-4b75-909e-514deaea8397\" (UID: \"909f168f-fd5f-4b75-909e-514deaea8397\") " Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.291885 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/909f168f-fd5f-4b75-909e-514deaea8397-utilities\") pod \"909f168f-fd5f-4b75-909e-514deaea8397\" (UID: \"909f168f-fd5f-4b75-909e-514deaea8397\") " Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.291949 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x85hv\" (UniqueName: \"kubernetes.io/projected/909f168f-fd5f-4b75-909e-514deaea8397-kube-api-access-x85hv\") pod \"909f168f-fd5f-4b75-909e-514deaea8397\" (UID: \"909f168f-fd5f-4b75-909e-514deaea8397\") " Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.293283 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/909f168f-fd5f-4b75-909e-514deaea8397-utilities" (OuterVolumeSpecName: "utilities") pod "909f168f-fd5f-4b75-909e-514deaea8397" (UID: "909f168f-fd5f-4b75-909e-514deaea8397"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.300645 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/909f168f-fd5f-4b75-909e-514deaea8397-kube-api-access-x85hv" (OuterVolumeSpecName: "kube-api-access-x85hv") pod "909f168f-fd5f-4b75-909e-514deaea8397" (UID: "909f168f-fd5f-4b75-909e-514deaea8397"). InnerVolumeSpecName "kube-api-access-x85hv". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.307378 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/909f168f-fd5f-4b75-909e-514deaea8397-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "909f168f-fd5f-4b75-909e-514deaea8397" (UID: "909f168f-fd5f-4b75-909e-514deaea8397"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.393348 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/909f168f-fd5f-4b75-909e-514deaea8397-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.393402 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-x85hv\" (UniqueName: \"kubernetes.io/projected/909f168f-fd5f-4b75-909e-514deaea8397-kube-api-access-x85hv\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.393427 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/909f168f-fd5f-4b75-909e-514deaea8397-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.566771 5113 generic.go:358] "Generic (PLEG): container finished" podID="909f168f-fd5f-4b75-909e-514deaea8397" containerID="0d5b0bfd222e4a49c467c7c5f12dcde65bd95681f1583653d117ba745ebaf67b" exitCode=0 Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.566887 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-zw2z8" Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.566901 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zw2z8" event={"ID":"909f168f-fd5f-4b75-909e-514deaea8397","Type":"ContainerDied","Data":"0d5b0bfd222e4a49c467c7c5f12dcde65bd95681f1583653d117ba745ebaf67b"} Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.566988 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-zw2z8" event={"ID":"909f168f-fd5f-4b75-909e-514deaea8397","Type":"ContainerDied","Data":"ffe622ccfa50ed7336982623e30860d3a5207b66a0805dcec17a7a5807eced95"} Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.567031 5113 scope.go:117] "RemoveContainer" containerID="0d5b0bfd222e4a49c467c7c5f12dcde65bd95681f1583653d117ba745ebaf67b" Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.591933 5113 scope.go:117] "RemoveContainer" containerID="a026ac1a933f3fb229679c65180ee62adaedf95cb8ea135ade935daa2ca9cc87" Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.603667 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-zw2z8"] Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.609894 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-zw2z8"] Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.629249 5113 scope.go:117] "RemoveContainer" containerID="2444e876087b90e83211dfbe36dfd8459e698b792af3b6e0698d71e740b5560b" Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.660337 5113 scope.go:117] "RemoveContainer" containerID="0d5b0bfd222e4a49c467c7c5f12dcde65bd95681f1583653d117ba745ebaf67b" Jan 30 00:21:23 crc kubenswrapper[5113]: E0130 00:21:23.664688 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d5b0bfd222e4a49c467c7c5f12dcde65bd95681f1583653d117ba745ebaf67b\": container with ID starting with 0d5b0bfd222e4a49c467c7c5f12dcde65bd95681f1583653d117ba745ebaf67b not found: ID does not exist" containerID="0d5b0bfd222e4a49c467c7c5f12dcde65bd95681f1583653d117ba745ebaf67b" Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.664752 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d5b0bfd222e4a49c467c7c5f12dcde65bd95681f1583653d117ba745ebaf67b"} err="failed to get container status \"0d5b0bfd222e4a49c467c7c5f12dcde65bd95681f1583653d117ba745ebaf67b\": rpc error: code = NotFound desc = could not find container \"0d5b0bfd222e4a49c467c7c5f12dcde65bd95681f1583653d117ba745ebaf67b\": container with ID starting with 0d5b0bfd222e4a49c467c7c5f12dcde65bd95681f1583653d117ba745ebaf67b not found: ID does not exist" Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.664794 5113 scope.go:117] "RemoveContainer" containerID="a026ac1a933f3fb229679c65180ee62adaedf95cb8ea135ade935daa2ca9cc87" Jan 30 00:21:23 crc kubenswrapper[5113]: E0130 00:21:23.666944 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a026ac1a933f3fb229679c65180ee62adaedf95cb8ea135ade935daa2ca9cc87\": container with ID starting with a026ac1a933f3fb229679c65180ee62adaedf95cb8ea135ade935daa2ca9cc87 not found: ID does not exist" containerID="a026ac1a933f3fb229679c65180ee62adaedf95cb8ea135ade935daa2ca9cc87" Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.667039 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a026ac1a933f3fb229679c65180ee62adaedf95cb8ea135ade935daa2ca9cc87"} err="failed to get container status \"a026ac1a933f3fb229679c65180ee62adaedf95cb8ea135ade935daa2ca9cc87\": rpc error: code = NotFound desc = could not find container \"a026ac1a933f3fb229679c65180ee62adaedf95cb8ea135ade935daa2ca9cc87\": container with ID starting with a026ac1a933f3fb229679c65180ee62adaedf95cb8ea135ade935daa2ca9cc87 not found: ID does not exist" Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.667104 5113 scope.go:117] "RemoveContainer" containerID="2444e876087b90e83211dfbe36dfd8459e698b792af3b6e0698d71e740b5560b" Jan 30 00:21:23 crc kubenswrapper[5113]: E0130 00:21:23.667587 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2444e876087b90e83211dfbe36dfd8459e698b792af3b6e0698d71e740b5560b\": container with ID starting with 2444e876087b90e83211dfbe36dfd8459e698b792af3b6e0698d71e740b5560b not found: ID does not exist" containerID="2444e876087b90e83211dfbe36dfd8459e698b792af3b6e0698d71e740b5560b" Jan 30 00:21:23 crc kubenswrapper[5113]: I0130 00:21:23.667627 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2444e876087b90e83211dfbe36dfd8459e698b792af3b6e0698d71e740b5560b"} err="failed to get container status \"2444e876087b90e83211dfbe36dfd8459e698b792af3b6e0698d71e740b5560b\": rpc error: code = NotFound desc = could not find container \"2444e876087b90e83211dfbe36dfd8459e698b792af3b6e0698d71e740b5560b\": container with ID starting with 2444e876087b90e83211dfbe36dfd8459e698b792af3b6e0698d71e740b5560b not found: ID does not exist" Jan 30 00:21:24 crc kubenswrapper[5113]: I0130 00:21:24.789179 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="909f168f-fd5f-4b75-909e-514deaea8397" path="/var/lib/kubelet/pods/909f168f-fd5f-4b75-909e-514deaea8397/volumes" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.366720 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4"] Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.367611 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="909f168f-fd5f-4b75-909e-514deaea8397" containerName="extract-content" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.367639 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="909f168f-fd5f-4b75-909e-514deaea8397" containerName="extract-content" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.367655 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="909f168f-fd5f-4b75-909e-514deaea8397" containerName="extract-utilities" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.367665 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="909f168f-fd5f-4b75-909e-514deaea8397" containerName="extract-utilities" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.367681 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="909f168f-fd5f-4b75-909e-514deaea8397" containerName="registry-server" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.367690 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="909f168f-fd5f-4b75-909e-514deaea8397" containerName="registry-server" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.367862 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="909f168f-fd5f-4b75-909e-514deaea8397" containerName="registry-server" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.382442 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4"] Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.382734 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.385320 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"default-dockercfg-b2ccr\"" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.436691 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4\" (UID: \"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.436772 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4\" (UID: \"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.436909 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s595d\" (UniqueName: \"kubernetes.io/projected/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-kube-api-access-s595d\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4\" (UID: \"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.538169 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-s595d\" (UniqueName: \"kubernetes.io/projected/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-kube-api-access-s595d\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4\" (UID: \"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.538262 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4\" (UID: \"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.538311 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4\" (UID: \"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.538877 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4\" (UID: \"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.538908 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4\" (UID: \"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.558878 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-s595d\" (UniqueName: \"kubernetes.io/projected/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-kube-api-access-s595d\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4\" (UID: \"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.703536 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" Jan 30 00:21:26 crc kubenswrapper[5113]: I0130 00:21:26.917091 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4"] Jan 30 00:21:27 crc kubenswrapper[5113]: I0130 00:21:27.599011 5113 generic.go:358] "Generic (PLEG): container finished" podID="f2c87f6f-31be-47ab-bcf7-4d6f1fd34415" containerID="eaadd745307e5bcba5bc85e86d345c8e6f97c0697f2365c7306b53f3ba723e7a" exitCode=0 Jan 30 00:21:27 crc kubenswrapper[5113]: I0130 00:21:27.599092 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" event={"ID":"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415","Type":"ContainerDied","Data":"eaadd745307e5bcba5bc85e86d345c8e6f97c0697f2365c7306b53f3ba723e7a"} Jan 30 00:21:27 crc kubenswrapper[5113]: I0130 00:21:27.599986 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" event={"ID":"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415","Type":"ContainerStarted","Data":"88f83295c57761edb90cccb7679962fd3906d9af8153b8294f846baf56e588a7"} Jan 30 00:21:29 crc kubenswrapper[5113]: I0130 00:21:29.618381 5113 generic.go:358] "Generic (PLEG): container finished" podID="f2c87f6f-31be-47ab-bcf7-4d6f1fd34415" containerID="b2ea2c9b2496a039eea3048abd3017820f10b9e2a1f498e37c2f950481733750" exitCode=0 Jan 30 00:21:29 crc kubenswrapper[5113]: I0130 00:21:29.618445 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" event={"ID":"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415","Type":"ContainerDied","Data":"b2ea2c9b2496a039eea3048abd3017820f10b9e2a1f498e37c2f950481733750"} Jan 30 00:21:30 crc kubenswrapper[5113]: I0130 00:21:30.630437 5113 generic.go:358] "Generic (PLEG): container finished" podID="f2c87f6f-31be-47ab-bcf7-4d6f1fd34415" containerID="f339a7f82fb122667a1ba98480a7260a216936f126209126ea70a894357ffffa" exitCode=0 Jan 30 00:21:30 crc kubenswrapper[5113]: I0130 00:21:30.630758 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" event={"ID":"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415","Type":"ContainerDied","Data":"f339a7f82fb122667a1ba98480a7260a216936f126209126ea70a894357ffffa"} Jan 30 00:21:31 crc kubenswrapper[5113]: I0130 00:21:31.969497 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" Jan 30 00:21:32 crc kubenswrapper[5113]: I0130 00:21:32.127376 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s595d\" (UniqueName: \"kubernetes.io/projected/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-kube-api-access-s595d\") pod \"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415\" (UID: \"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415\") " Jan 30 00:21:32 crc kubenswrapper[5113]: I0130 00:21:32.127661 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-util\") pod \"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415\" (UID: \"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415\") " Jan 30 00:21:32 crc kubenswrapper[5113]: I0130 00:21:32.127801 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-bundle\") pod \"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415\" (UID: \"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415\") " Jan 30 00:21:32 crc kubenswrapper[5113]: I0130 00:21:32.131458 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-bundle" (OuterVolumeSpecName: "bundle") pod "f2c87f6f-31be-47ab-bcf7-4d6f1fd34415" (UID: "f2c87f6f-31be-47ab-bcf7-4d6f1fd34415"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:21:32 crc kubenswrapper[5113]: I0130 00:21:32.139191 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-kube-api-access-s595d" (OuterVolumeSpecName: "kube-api-access-s595d") pod "f2c87f6f-31be-47ab-bcf7-4d6f1fd34415" (UID: "f2c87f6f-31be-47ab-bcf7-4d6f1fd34415"). InnerVolumeSpecName "kube-api-access-s595d". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:21:32 crc kubenswrapper[5113]: I0130 00:21:32.154561 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-util" (OuterVolumeSpecName: "util") pod "f2c87f6f-31be-47ab-bcf7-4d6f1fd34415" (UID: "f2c87f6f-31be-47ab-bcf7-4d6f1fd34415"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:21:32 crc kubenswrapper[5113]: I0130 00:21:32.230096 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-s595d\" (UniqueName: \"kubernetes.io/projected/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-kube-api-access-s595d\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:32 crc kubenswrapper[5113]: I0130 00:21:32.230153 5113 reconciler_common.go:299] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-util\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:32 crc kubenswrapper[5113]: I0130 00:21:32.230173 5113 reconciler_common.go:299] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f2c87f6f-31be-47ab-bcf7-4d6f1fd34415-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:32 crc kubenswrapper[5113]: I0130 00:21:32.648209 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" event={"ID":"f2c87f6f-31be-47ab-bcf7-4d6f1fd34415","Type":"ContainerDied","Data":"88f83295c57761edb90cccb7679962fd3906d9af8153b8294f846baf56e588a7"} Jan 30 00:21:32 crc kubenswrapper[5113]: I0130 00:21:32.648730 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="88f83295c57761edb90cccb7679962fd3906d9af8153b8294f846baf56e588a7" Jan 30 00:21:32 crc kubenswrapper[5113]: I0130 00:21:32.648347 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.574554 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7"] Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.575591 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="f2c87f6f-31be-47ab-bcf7-4d6f1fd34415" containerName="util" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.575618 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2c87f6f-31be-47ab-bcf7-4d6f1fd34415" containerName="util" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.575661 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="f2c87f6f-31be-47ab-bcf7-4d6f1fd34415" containerName="pull" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.575673 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2c87f6f-31be-47ab-bcf7-4d6f1fd34415" containerName="pull" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.575690 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="f2c87f6f-31be-47ab-bcf7-4d6f1fd34415" containerName="extract" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.575704 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2c87f6f-31be-47ab-bcf7-4d6f1fd34415" containerName="extract" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.575886 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="f2c87f6f-31be-47ab-bcf7-4d6f1fd34415" containerName="extract" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.587386 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7"] Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.587632 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.592082 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-marketplace\"/\"default-dockercfg-b2ccr\"" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.660555 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvxpf\" (UniqueName: \"kubernetes.io/projected/b060f104-6f51-4e29-987f-78aec9eb9a4f-kube-api-access-cvxpf\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7\" (UID: \"b060f104-6f51-4e29-987f-78aec9eb9a4f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.660672 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b060f104-6f51-4e29-987f-78aec9eb9a4f-bundle\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7\" (UID: \"b060f104-6f51-4e29-987f-78aec9eb9a4f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.660790 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b060f104-6f51-4e29-987f-78aec9eb9a4f-util\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7\" (UID: \"b060f104-6f51-4e29-987f-78aec9eb9a4f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.762585 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b060f104-6f51-4e29-987f-78aec9eb9a4f-bundle\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7\" (UID: \"b060f104-6f51-4e29-987f-78aec9eb9a4f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.762651 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b060f104-6f51-4e29-987f-78aec9eb9a4f-util\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7\" (UID: \"b060f104-6f51-4e29-987f-78aec9eb9a4f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.762802 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-cvxpf\" (UniqueName: \"kubernetes.io/projected/b060f104-6f51-4e29-987f-78aec9eb9a4f-kube-api-access-cvxpf\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7\" (UID: \"b060f104-6f51-4e29-987f-78aec9eb9a4f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.763873 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b060f104-6f51-4e29-987f-78aec9eb9a4f-bundle\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7\" (UID: \"b060f104-6f51-4e29-987f-78aec9eb9a4f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.764615 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b060f104-6f51-4e29-987f-78aec9eb9a4f-util\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7\" (UID: \"b060f104-6f51-4e29-987f-78aec9eb9a4f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.797770 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvxpf\" (UniqueName: \"kubernetes.io/projected/b060f104-6f51-4e29-987f-78aec9eb9a4f-kube-api-access-cvxpf\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7\" (UID: \"b060f104-6f51-4e29-987f-78aec9eb9a4f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" Jan 30 00:21:33 crc kubenswrapper[5113]: I0130 00:21:33.930840 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.204808 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7"] Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.356803 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46"] Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.362719 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.370762 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46"] Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.475977 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/37e551ce-ff20-486b-986a-429cb060e341-bundle\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46\" (UID: \"37e551ce-ff20-486b-986a-429cb060e341\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.476275 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxmz5\" (UniqueName: \"kubernetes.io/projected/37e551ce-ff20-486b-986a-429cb060e341-kube-api-access-pxmz5\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46\" (UID: \"37e551ce-ff20-486b-986a-429cb060e341\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.476411 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/37e551ce-ff20-486b-986a-429cb060e341-util\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46\" (UID: \"37e551ce-ff20-486b-986a-429cb060e341\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.578261 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/37e551ce-ff20-486b-986a-429cb060e341-util\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46\" (UID: \"37e551ce-ff20-486b-986a-429cb060e341\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.578797 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/37e551ce-ff20-486b-986a-429cb060e341-bundle\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46\" (UID: \"37e551ce-ff20-486b-986a-429cb060e341\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.578879 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-pxmz5\" (UniqueName: \"kubernetes.io/projected/37e551ce-ff20-486b-986a-429cb060e341-kube-api-access-pxmz5\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46\" (UID: \"37e551ce-ff20-486b-986a-429cb060e341\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.579022 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/37e551ce-ff20-486b-986a-429cb060e341-util\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46\" (UID: \"37e551ce-ff20-486b-986a-429cb060e341\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.579459 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/37e551ce-ff20-486b-986a-429cb060e341-bundle\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46\" (UID: \"37e551ce-ff20-486b-986a-429cb060e341\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.603914 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxmz5\" (UniqueName: \"kubernetes.io/projected/37e551ce-ff20-486b-986a-429cb060e341-kube-api-access-pxmz5\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46\" (UID: \"37e551ce-ff20-486b-986a-429cb060e341\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.664229 5113 generic.go:358] "Generic (PLEG): container finished" podID="b060f104-6f51-4e29-987f-78aec9eb9a4f" containerID="e84b8dbe9a5df25f0e993cdba1a0b0ddbfa6bcab8ff407f4cab0ad8578890215" exitCode=0 Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.664426 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" event={"ID":"b060f104-6f51-4e29-987f-78aec9eb9a4f","Type":"ContainerDied","Data":"e84b8dbe9a5df25f0e993cdba1a0b0ddbfa6bcab8ff407f4cab0ad8578890215"} Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.664469 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" event={"ID":"b060f104-6f51-4e29-987f-78aec9eb9a4f","Type":"ContainerStarted","Data":"d916c06fbd59579b876e231e4100bedecdc1e41291803864ebba0600d3d17b0f"} Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.693835 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" Jan 30 00:21:34 crc kubenswrapper[5113]: I0130 00:21:34.952737 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46"] Jan 30 00:21:35 crc kubenswrapper[5113]: I0130 00:21:35.674696 5113 generic.go:358] "Generic (PLEG): container finished" podID="37e551ce-ff20-486b-986a-429cb060e341" containerID="ec695e513699f20af634ead34c909f63fec109fa1eacaff9407c8791d0ee40d8" exitCode=0 Jan 30 00:21:35 crc kubenswrapper[5113]: I0130 00:21:35.674830 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" event={"ID":"37e551ce-ff20-486b-986a-429cb060e341","Type":"ContainerDied","Data":"ec695e513699f20af634ead34c909f63fec109fa1eacaff9407c8791d0ee40d8"} Jan 30 00:21:35 crc kubenswrapper[5113]: I0130 00:21:35.675318 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" event={"ID":"37e551ce-ff20-486b-986a-429cb060e341","Type":"ContainerStarted","Data":"c747c6069ed349c3f231b828dd9ea5faefcb570e2c613147c5600d11b50b5236"} Jan 30 00:21:35 crc kubenswrapper[5113]: E0130 00:21:35.919878 5113 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" image="registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb" Jan 30 00:21:35 crc kubenswrapper[5113]: E0130 00:21:35.920132 5113 kuberuntime_manager.go:1358] "Unhandled Error" err="init container &Container{Name:pull,Image:registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb,Command:[/util/cpb /bundle],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:bundle,ReadOnly:false,MountPath:/bundle,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:util,ReadOnly:false,MountPath:/util,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pxmz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000240000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46_openshift-marketplace(37e551ce-ff20-486b-986a-429cb060e341): ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" logger="UnhandledError" Jan 30 00:21:35 crc kubenswrapper[5113]: E0130 00:21:35.921380 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ErrImagePull: \"unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:21:36 crc kubenswrapper[5113]: I0130 00:21:36.685861 5113 generic.go:358] "Generic (PLEG): container finished" podID="b060f104-6f51-4e29-987f-78aec9eb9a4f" containerID="c52ce130d28a98ac05ef3cfbd4a15c0aafae12f1d40b22d1d115ff1a4a2e0e3f" exitCode=0 Jan 30 00:21:36 crc kubenswrapper[5113]: I0130 00:21:36.685955 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" event={"ID":"b060f104-6f51-4e29-987f-78aec9eb9a4f","Type":"ContainerDied","Data":"c52ce130d28a98ac05ef3cfbd4a15c0aafae12f1d40b22d1d115ff1a4a2e0e3f"} Jan 30 00:21:36 crc kubenswrapper[5113]: E0130 00:21:36.689060 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:21:37 crc kubenswrapper[5113]: I0130 00:21:37.695731 5113 generic.go:358] "Generic (PLEG): container finished" podID="b060f104-6f51-4e29-987f-78aec9eb9a4f" containerID="2cc3310192e7d18bec997fd93e4da37dca2f2bcb434dee5af4995da997af781d" exitCode=0 Jan 30 00:21:37 crc kubenswrapper[5113]: I0130 00:21:37.695806 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" event={"ID":"b060f104-6f51-4e29-987f-78aec9eb9a4f","Type":"ContainerDied","Data":"2cc3310192e7d18bec997fd93e4da37dca2f2bcb434dee5af4995da997af781d"} Jan 30 00:21:39 crc kubenswrapper[5113]: I0130 00:21:39.080743 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" Jan 30 00:21:39 crc kubenswrapper[5113]: I0130 00:21:39.156292 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvxpf\" (UniqueName: \"kubernetes.io/projected/b060f104-6f51-4e29-987f-78aec9eb9a4f-kube-api-access-cvxpf\") pod \"b060f104-6f51-4e29-987f-78aec9eb9a4f\" (UID: \"b060f104-6f51-4e29-987f-78aec9eb9a4f\") " Jan 30 00:21:39 crc kubenswrapper[5113]: I0130 00:21:39.156438 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b060f104-6f51-4e29-987f-78aec9eb9a4f-bundle\") pod \"b060f104-6f51-4e29-987f-78aec9eb9a4f\" (UID: \"b060f104-6f51-4e29-987f-78aec9eb9a4f\") " Jan 30 00:21:39 crc kubenswrapper[5113]: I0130 00:21:39.156507 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b060f104-6f51-4e29-987f-78aec9eb9a4f-util\") pod \"b060f104-6f51-4e29-987f-78aec9eb9a4f\" (UID: \"b060f104-6f51-4e29-987f-78aec9eb9a4f\") " Jan 30 00:21:39 crc kubenswrapper[5113]: I0130 00:21:39.158429 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b060f104-6f51-4e29-987f-78aec9eb9a4f-bundle" (OuterVolumeSpecName: "bundle") pod "b060f104-6f51-4e29-987f-78aec9eb9a4f" (UID: "b060f104-6f51-4e29-987f-78aec9eb9a4f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:21:39 crc kubenswrapper[5113]: I0130 00:21:39.175747 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b060f104-6f51-4e29-987f-78aec9eb9a4f-kube-api-access-cvxpf" (OuterVolumeSpecName: "kube-api-access-cvxpf") pod "b060f104-6f51-4e29-987f-78aec9eb9a4f" (UID: "b060f104-6f51-4e29-987f-78aec9eb9a4f"). InnerVolumeSpecName "kube-api-access-cvxpf". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:21:39 crc kubenswrapper[5113]: I0130 00:21:39.181767 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b060f104-6f51-4e29-987f-78aec9eb9a4f-util" (OuterVolumeSpecName: "util") pod "b060f104-6f51-4e29-987f-78aec9eb9a4f" (UID: "b060f104-6f51-4e29-987f-78aec9eb9a4f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:21:39 crc kubenswrapper[5113]: I0130 00:21:39.258064 5113 reconciler_common.go:299] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b060f104-6f51-4e29-987f-78aec9eb9a4f-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:39 crc kubenswrapper[5113]: I0130 00:21:39.258111 5113 reconciler_common.go:299] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b060f104-6f51-4e29-987f-78aec9eb9a4f-util\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:39 crc kubenswrapper[5113]: I0130 00:21:39.258123 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-cvxpf\" (UniqueName: \"kubernetes.io/projected/b060f104-6f51-4e29-987f-78aec9eb9a4f-kube-api-access-cvxpf\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:39 crc kubenswrapper[5113]: I0130 00:21:39.721799 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" event={"ID":"b060f104-6f51-4e29-987f-78aec9eb9a4f","Type":"ContainerDied","Data":"d916c06fbd59579b876e231e4100bedecdc1e41291803864ebba0600d3d17b0f"} Jan 30 00:21:39 crc kubenswrapper[5113]: I0130 00:21:39.722829 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d916c06fbd59579b876e231e4100bedecdc1e41291803864ebba0600d3d17b0f" Jan 30 00:21:39 crc kubenswrapper[5113]: I0130 00:21:39.723047 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7" Jan 30 00:21:41 crc kubenswrapper[5113]: I0130 00:21:41.874682 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm"] Jan 30 00:21:41 crc kubenswrapper[5113]: I0130 00:21:41.875342 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="b060f104-6f51-4e29-987f-78aec9eb9a4f" containerName="util" Jan 30 00:21:41 crc kubenswrapper[5113]: I0130 00:21:41.875358 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="b060f104-6f51-4e29-987f-78aec9eb9a4f" containerName="util" Jan 30 00:21:41 crc kubenswrapper[5113]: I0130 00:21:41.875370 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="b060f104-6f51-4e29-987f-78aec9eb9a4f" containerName="pull" Jan 30 00:21:41 crc kubenswrapper[5113]: I0130 00:21:41.875376 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="b060f104-6f51-4e29-987f-78aec9eb9a4f" containerName="pull" Jan 30 00:21:41 crc kubenswrapper[5113]: I0130 00:21:41.875411 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="b060f104-6f51-4e29-987f-78aec9eb9a4f" containerName="extract" Jan 30 00:21:41 crc kubenswrapper[5113]: I0130 00:21:41.875417 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="b060f104-6f51-4e29-987f-78aec9eb9a4f" containerName="extract" Jan 30 00:21:41 crc kubenswrapper[5113]: I0130 00:21:41.875507 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="b060f104-6f51-4e29-987f-78aec9eb9a4f" containerName="extract" Jan 30 00:21:41 crc kubenswrapper[5113]: I0130 00:21:41.883773 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" Jan 30 00:21:41 crc kubenswrapper[5113]: I0130 00:21:41.903235 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm"] Jan 30 00:21:41 crc kubenswrapper[5113]: I0130 00:21:41.996331 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm\" (UID: \"dc777063-84e1-4b8d-93b5-8ac8bbfee31d\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" Jan 30 00:21:41 crc kubenswrapper[5113]: I0130 00:21:41.996510 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm\" (UID: \"dc777063-84e1-4b8d-93b5-8ac8bbfee31d\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" Jan 30 00:21:41 crc kubenswrapper[5113]: I0130 00:21:41.996625 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8lvn\" (UniqueName: \"kubernetes.io/projected/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-kube-api-access-z8lvn\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm\" (UID: \"dc777063-84e1-4b8d-93b5-8ac8bbfee31d\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" Jan 30 00:21:42 crc kubenswrapper[5113]: I0130 00:21:42.097998 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm\" (UID: \"dc777063-84e1-4b8d-93b5-8ac8bbfee31d\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" Jan 30 00:21:42 crc kubenswrapper[5113]: I0130 00:21:42.098198 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm\" (UID: \"dc777063-84e1-4b8d-93b5-8ac8bbfee31d\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" Jan 30 00:21:42 crc kubenswrapper[5113]: I0130 00:21:42.098240 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-z8lvn\" (UniqueName: \"kubernetes.io/projected/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-kube-api-access-z8lvn\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm\" (UID: \"dc777063-84e1-4b8d-93b5-8ac8bbfee31d\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" Jan 30 00:21:42 crc kubenswrapper[5113]: I0130 00:21:42.098766 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-util\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm\" (UID: \"dc777063-84e1-4b8d-93b5-8ac8bbfee31d\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" Jan 30 00:21:42 crc kubenswrapper[5113]: I0130 00:21:42.099042 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-bundle\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm\" (UID: \"dc777063-84e1-4b8d-93b5-8ac8bbfee31d\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" Jan 30 00:21:42 crc kubenswrapper[5113]: I0130 00:21:42.147557 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8lvn\" (UniqueName: \"kubernetes.io/projected/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-kube-api-access-z8lvn\") pod \"925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm\" (UID: \"dc777063-84e1-4b8d-93b5-8ac8bbfee31d\") " pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" Jan 30 00:21:42 crc kubenswrapper[5113]: I0130 00:21:42.202198 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" Jan 30 00:21:42 crc kubenswrapper[5113]: I0130 00:21:42.675128 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm"] Jan 30 00:21:42 crc kubenswrapper[5113]: I0130 00:21:42.765940 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" event={"ID":"dc777063-84e1-4b8d-93b5-8ac8bbfee31d","Type":"ContainerStarted","Data":"fc39653aaf0dc6a509e71729406d0cf83af0d196c1d5d20ec64c3f4af4010018"} Jan 30 00:21:43 crc kubenswrapper[5113]: I0130 00:21:43.774364 5113 generic.go:358] "Generic (PLEG): container finished" podID="dc777063-84e1-4b8d-93b5-8ac8bbfee31d" containerID="4e28f8c1517d7fecd71a1f2be86f667e230d10b58181e432ba110ed2f8169843" exitCode=0 Jan 30 00:21:43 crc kubenswrapper[5113]: I0130 00:21:43.776406 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" event={"ID":"dc777063-84e1-4b8d-93b5-8ac8bbfee31d","Type":"ContainerDied","Data":"4e28f8c1517d7fecd71a1f2be86f667e230d10b58181e432ba110ed2f8169843"} Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.787995 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-9bc85b4bf-twh29"] Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.798473 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-9bc85b4bf-twh29" Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.802690 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operators\"/\"openshift-service-ca.crt\"" Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.803630 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operators\"/\"kube-root-ca.crt\"" Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.804205 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operators\"/\"obo-prometheus-operator-dockercfg-rnf4c\"" Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.823140 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-9bc85b4bf-twh29"] Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.836517 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk"] Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.847891 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk" Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.851406 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operators\"/\"obo-prometheus-operator-admission-webhook-service-cert\"" Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.851543 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operators\"/\"obo-prometheus-operator-admission-webhook-dockercfg-m4p5c\"" Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.860335 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4"] Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.868390 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lkjg\" (UniqueName: \"kubernetes.io/projected/4444bab4-230c-48fb-b893-8d3e93807137-kube-api-access-9lkjg\") pod \"obo-prometheus-operator-9bc85b4bf-twh29\" (UID: \"4444bab4-230c-48fb-b893-8d3e93807137\") " pod="openshift-operators/obo-prometheus-operator-9bc85b4bf-twh29" Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.872882 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk"] Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.873257 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4" Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.891685 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4"] Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.970179 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-9lkjg\" (UniqueName: \"kubernetes.io/projected/4444bab4-230c-48fb-b893-8d3e93807137-kube-api-access-9lkjg\") pod \"obo-prometheus-operator-9bc85b4bf-twh29\" (UID: \"4444bab4-230c-48fb-b893-8d3e93807137\") " pod="openshift-operators/obo-prometheus-operator-9bc85b4bf-twh29" Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.970257 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0059f278-9b5a-4441-bd68-67ee6c139623-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk\" (UID: \"0059f278-9b5a-4441-bd68-67ee6c139623\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk" Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.970296 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9bc38c6e-f8a9-4383-b930-43e56fe731f7-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4\" (UID: \"9bc38c6e-f8a9-4383-b930-43e56fe731f7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4" Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.970317 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9bc38c6e-f8a9-4383-b930-43e56fe731f7-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4\" (UID: \"9bc38c6e-f8a9-4383-b930-43e56fe731f7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4" Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.970506 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0059f278-9b5a-4441-bd68-67ee6c139623-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk\" (UID: \"0059f278-9b5a-4441-bd68-67ee6c139623\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk" Jan 30 00:21:45 crc kubenswrapper[5113]: I0130 00:21:45.997004 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lkjg\" (UniqueName: \"kubernetes.io/projected/4444bab4-230c-48fb-b893-8d3e93807137-kube-api-access-9lkjg\") pod \"obo-prometheus-operator-9bc85b4bf-twh29\" (UID: \"4444bab4-230c-48fb-b893-8d3e93807137\") " pod="openshift-operators/obo-prometheus-operator-9bc85b4bf-twh29" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.037353 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-85c68dddb-l2hz8"] Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.056916 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-85c68dddb-l2hz8" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.059640 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operators\"/\"observability-operator-tls\"" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.060214 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operators\"/\"observability-operator-sa-dockercfg-fzjxp\"" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.072949 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0059f278-9b5a-4441-bd68-67ee6c139623-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk\" (UID: \"0059f278-9b5a-4441-bd68-67ee6c139623\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.073064 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0059f278-9b5a-4441-bd68-67ee6c139623-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk\" (UID: \"0059f278-9b5a-4441-bd68-67ee6c139623\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.073102 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9bc38c6e-f8a9-4383-b930-43e56fe731f7-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4\" (UID: \"9bc38c6e-f8a9-4383-b930-43e56fe731f7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.073126 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9bc38c6e-f8a9-4383-b930-43e56fe731f7-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4\" (UID: \"9bc38c6e-f8a9-4383-b930-43e56fe731f7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.081199 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0059f278-9b5a-4441-bd68-67ee6c139623-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk\" (UID: \"0059f278-9b5a-4441-bd68-67ee6c139623\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.083950 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-85c68dddb-l2hz8"] Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.094211 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0059f278-9b5a-4441-bd68-67ee6c139623-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk\" (UID: \"0059f278-9b5a-4441-bd68-67ee6c139623\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.094280 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/9bc38c6e-f8a9-4383-b930-43e56fe731f7-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4\" (UID: \"9bc38c6e-f8a9-4383-b930-43e56fe731f7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.101224 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/9bc38c6e-f8a9-4383-b930-43e56fe731f7-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4\" (UID: \"9bc38c6e-f8a9-4383-b930-43e56fe731f7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.120331 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-9bc85b4bf-twh29" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.175122 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/e4c6bf15-ffcf-4513-a573-c2a328472abe-observability-operator-tls\") pod \"observability-operator-85c68dddb-l2hz8\" (UID: \"e4c6bf15-ffcf-4513-a573-c2a328472abe\") " pod="openshift-operators/observability-operator-85c68dddb-l2hz8" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.175216 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4qmd\" (UniqueName: \"kubernetes.io/projected/e4c6bf15-ffcf-4513-a573-c2a328472abe-kube-api-access-f4qmd\") pod \"observability-operator-85c68dddb-l2hz8\" (UID: \"e4c6bf15-ffcf-4513-a573-c2a328472abe\") " pod="openshift-operators/observability-operator-85c68dddb-l2hz8" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.182145 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.197716 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.221152 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-669c9f96b5-4c4gl"] Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.259494 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-669c9f96b5-4c4gl" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.264124 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operators\"/\"perses-operator-dockercfg-f9ptn\"" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.276451 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-f4qmd\" (UniqueName: \"kubernetes.io/projected/e4c6bf15-ffcf-4513-a573-c2a328472abe-kube-api-access-f4qmd\") pod \"observability-operator-85c68dddb-l2hz8\" (UID: \"e4c6bf15-ffcf-4513-a573-c2a328472abe\") " pod="openshift-operators/observability-operator-85c68dddb-l2hz8" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.281589 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wfrk\" (UniqueName: \"kubernetes.io/projected/8c07fd1a-e7bd-414d-a32e-68fda0f0e4a5-kube-api-access-6wfrk\") pod \"perses-operator-669c9f96b5-4c4gl\" (UID: \"8c07fd1a-e7bd-414d-a32e-68fda0f0e4a5\") " pod="openshift-operators/perses-operator-669c9f96b5-4c4gl" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.281738 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/e4c6bf15-ffcf-4513-a573-c2a328472abe-observability-operator-tls\") pod \"observability-operator-85c68dddb-l2hz8\" (UID: \"e4c6bf15-ffcf-4513-a573-c2a328472abe\") " pod="openshift-operators/observability-operator-85c68dddb-l2hz8" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.281858 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/8c07fd1a-e7bd-414d-a32e-68fda0f0e4a5-openshift-service-ca\") pod \"perses-operator-669c9f96b5-4c4gl\" (UID: \"8c07fd1a-e7bd-414d-a32e-68fda0f0e4a5\") " pod="openshift-operators/perses-operator-669c9f96b5-4c4gl" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.283808 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-669c9f96b5-4c4gl"] Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.296981 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/e4c6bf15-ffcf-4513-a573-c2a328472abe-observability-operator-tls\") pod \"observability-operator-85c68dddb-l2hz8\" (UID: \"e4c6bf15-ffcf-4513-a573-c2a328472abe\") " pod="openshift-operators/observability-operator-85c68dddb-l2hz8" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.322551 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4qmd\" (UniqueName: \"kubernetes.io/projected/e4c6bf15-ffcf-4513-a573-c2a328472abe-kube-api-access-f4qmd\") pod \"observability-operator-85c68dddb-l2hz8\" (UID: \"e4c6bf15-ffcf-4513-a573-c2a328472abe\") " pod="openshift-operators/observability-operator-85c68dddb-l2hz8" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.383721 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-6wfrk\" (UniqueName: \"kubernetes.io/projected/8c07fd1a-e7bd-414d-a32e-68fda0f0e4a5-kube-api-access-6wfrk\") pod \"perses-operator-669c9f96b5-4c4gl\" (UID: \"8c07fd1a-e7bd-414d-a32e-68fda0f0e4a5\") " pod="openshift-operators/perses-operator-669c9f96b5-4c4gl" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.383818 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/8c07fd1a-e7bd-414d-a32e-68fda0f0e4a5-openshift-service-ca\") pod \"perses-operator-669c9f96b5-4c4gl\" (UID: \"8c07fd1a-e7bd-414d-a32e-68fda0f0e4a5\") " pod="openshift-operators/perses-operator-669c9f96b5-4c4gl" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.384820 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/8c07fd1a-e7bd-414d-a32e-68fda0f0e4a5-openshift-service-ca\") pod \"perses-operator-669c9f96b5-4c4gl\" (UID: \"8c07fd1a-e7bd-414d-a32e-68fda0f0e4a5\") " pod="openshift-operators/perses-operator-669c9f96b5-4c4gl" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.414364 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wfrk\" (UniqueName: \"kubernetes.io/projected/8c07fd1a-e7bd-414d-a32e-68fda0f0e4a5-kube-api-access-6wfrk\") pod \"perses-operator-669c9f96b5-4c4gl\" (UID: \"8c07fd1a-e7bd-414d-a32e-68fda0f0e4a5\") " pod="openshift-operators/perses-operator-669c9f96b5-4c4gl" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.438329 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-85c68dddb-l2hz8" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.518686 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-9bc85b4bf-twh29"] Jan 30 00:21:46 crc kubenswrapper[5113]: W0130 00:21:46.542451 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4444bab4_230c_48fb_b893_8d3e93807137.slice/crio-f0cb2bfaa7df3ae9bf77ebe5633c032d38e285e0459498f73846ed4cbcb3c4e6 WatchSource:0}: Error finding container f0cb2bfaa7df3ae9bf77ebe5633c032d38e285e0459498f73846ed4cbcb3c4e6: Status 404 returned error can't find the container with id f0cb2bfaa7df3ae9bf77ebe5633c032d38e285e0459498f73846ed4cbcb3c4e6 Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.606187 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-669c9f96b5-4c4gl" Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.676038 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk"] Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.737373 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4"] Jan 30 00:21:46 crc kubenswrapper[5113]: W0130 00:21:46.766281 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9bc38c6e_f8a9_4383_b930_43e56fe731f7.slice/crio-e5fc8197899e7af679c6829c51ff50334048a3739d32ffc7a70e53b4649e060a WatchSource:0}: Error finding container e5fc8197899e7af679c6829c51ff50334048a3739d32ffc7a70e53b4649e060a: Status 404 returned error can't find the container with id e5fc8197899e7af679c6829c51ff50334048a3739d32ffc7a70e53b4649e060a Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.822202 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk" event={"ID":"0059f278-9b5a-4441-bd68-67ee6c139623","Type":"ContainerStarted","Data":"c053cb2c8cca39fc110b872c9111819d5e2dbfade4e764334eafc2cf703c0cad"} Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.834229 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4" event={"ID":"9bc38c6e-f8a9-4383-b930-43e56fe731f7","Type":"ContainerStarted","Data":"e5fc8197899e7af679c6829c51ff50334048a3739d32ffc7a70e53b4649e060a"} Jan 30 00:21:46 crc kubenswrapper[5113]: I0130 00:21:46.836004 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-9bc85b4bf-twh29" event={"ID":"4444bab4-230c-48fb-b893-8d3e93807137","Type":"ContainerStarted","Data":"f0cb2bfaa7df3ae9bf77ebe5633c032d38e285e0459498f73846ed4cbcb3c4e6"} Jan 30 00:21:47 crc kubenswrapper[5113]: I0130 00:21:47.071487 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-669c9f96b5-4c4gl"] Jan 30 00:21:47 crc kubenswrapper[5113]: I0130 00:21:47.137056 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-85c68dddb-l2hz8"] Jan 30 00:21:47 crc kubenswrapper[5113]: I0130 00:21:47.859706 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-669c9f96b5-4c4gl" event={"ID":"8c07fd1a-e7bd-414d-a32e-68fda0f0e4a5","Type":"ContainerStarted","Data":"72f7534ddd1cf27c09ff717259ce07477a1972bfadee7a0a641f26aee2d16155"} Jan 30 00:21:47 crc kubenswrapper[5113]: I0130 00:21:47.873876 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-85c68dddb-l2hz8" event={"ID":"e4c6bf15-ffcf-4513-a573-c2a328472abe","Type":"ContainerStarted","Data":"3e4f6eb3b4f275be089c3429ef29a18f42219d19efbcb339efd3e54ff2464050"} Jan 30 00:21:51 crc kubenswrapper[5113]: E0130 00:21:51.105278 5113 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" image="registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb" Jan 30 00:21:51 crc kubenswrapper[5113]: E0130 00:21:51.106422 5113 kuberuntime_manager.go:1358] "Unhandled Error" err="init container &Container{Name:pull,Image:registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb,Command:[/util/cpb /bundle],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:bundle,ReadOnly:false,MountPath:/bundle,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:util,ReadOnly:false,MountPath:/util,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pxmz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000240000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46_openshift-marketplace(37e551ce-ff20-486b-986a-429cb060e341): ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" logger="UnhandledError" Jan 30 00:21:51 crc kubenswrapper[5113]: E0130 00:21:51.108106 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ErrImagePull: \"unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:21:51 crc kubenswrapper[5113]: I0130 00:21:51.983135 5113 generic.go:358] "Generic (PLEG): container finished" podID="dc777063-84e1-4b8d-93b5-8ac8bbfee31d" containerID="0fb98c82ff1f1f14c8eebb92203d8bd289d449e26ee0fcff9b70259a942769c3" exitCode=0 Jan 30 00:21:51 crc kubenswrapper[5113]: I0130 00:21:51.983229 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" event={"ID":"dc777063-84e1-4b8d-93b5-8ac8bbfee31d","Type":"ContainerDied","Data":"0fb98c82ff1f1f14c8eebb92203d8bd289d449e26ee0fcff9b70259a942769c3"} Jan 30 00:21:52 crc kubenswrapper[5113]: I0130 00:21:52.997361 5113 generic.go:358] "Generic (PLEG): container finished" podID="dc777063-84e1-4b8d-93b5-8ac8bbfee31d" containerID="b9fe1dee411da6b77edf4805f6dc2a1235389f181276a35b3178db38e4538bcd" exitCode=0 Jan 30 00:21:52 crc kubenswrapper[5113]: I0130 00:21:52.997593 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" event={"ID":"dc777063-84e1-4b8d-93b5-8ac8bbfee31d","Type":"ContainerDied","Data":"b9fe1dee411da6b77edf4805f6dc2a1235389f181276a35b3178db38e4538bcd"} Jan 30 00:21:54 crc kubenswrapper[5113]: I0130 00:21:54.356287 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mrmfc"] Jan 30 00:21:54 crc kubenswrapper[5113]: I0130 00:21:54.364101 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mrmfc" Jan 30 00:21:54 crc kubenswrapper[5113]: I0130 00:21:54.366872 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mrmfc"] Jan 30 00:21:54 crc kubenswrapper[5113]: I0130 00:21:54.455304 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-catalog-content\") pod \"redhat-operators-mrmfc\" (UID: \"9b9b5244-7804-41cd-b34a-7acaaa9b89e1\") " pod="openshift-marketplace/redhat-operators-mrmfc" Jan 30 00:21:54 crc kubenswrapper[5113]: I0130 00:21:54.455392 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q97p8\" (UniqueName: \"kubernetes.io/projected/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-kube-api-access-q97p8\") pod \"redhat-operators-mrmfc\" (UID: \"9b9b5244-7804-41cd-b34a-7acaaa9b89e1\") " pod="openshift-marketplace/redhat-operators-mrmfc" Jan 30 00:21:54 crc kubenswrapper[5113]: I0130 00:21:54.456384 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-utilities\") pod \"redhat-operators-mrmfc\" (UID: \"9b9b5244-7804-41cd-b34a-7acaaa9b89e1\") " pod="openshift-marketplace/redhat-operators-mrmfc" Jan 30 00:21:54 crc kubenswrapper[5113]: I0130 00:21:54.558008 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-utilities\") pod \"redhat-operators-mrmfc\" (UID: \"9b9b5244-7804-41cd-b34a-7acaaa9b89e1\") " pod="openshift-marketplace/redhat-operators-mrmfc" Jan 30 00:21:54 crc kubenswrapper[5113]: I0130 00:21:54.558118 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-catalog-content\") pod \"redhat-operators-mrmfc\" (UID: \"9b9b5244-7804-41cd-b34a-7acaaa9b89e1\") " pod="openshift-marketplace/redhat-operators-mrmfc" Jan 30 00:21:54 crc kubenswrapper[5113]: I0130 00:21:54.558176 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-q97p8\" (UniqueName: \"kubernetes.io/projected/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-kube-api-access-q97p8\") pod \"redhat-operators-mrmfc\" (UID: \"9b9b5244-7804-41cd-b34a-7acaaa9b89e1\") " pod="openshift-marketplace/redhat-operators-mrmfc" Jan 30 00:21:54 crc kubenswrapper[5113]: I0130 00:21:54.558807 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-utilities\") pod \"redhat-operators-mrmfc\" (UID: \"9b9b5244-7804-41cd-b34a-7acaaa9b89e1\") " pod="openshift-marketplace/redhat-operators-mrmfc" Jan 30 00:21:54 crc kubenswrapper[5113]: I0130 00:21:54.559185 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-catalog-content\") pod \"redhat-operators-mrmfc\" (UID: \"9b9b5244-7804-41cd-b34a-7acaaa9b89e1\") " pod="openshift-marketplace/redhat-operators-mrmfc" Jan 30 00:21:54 crc kubenswrapper[5113]: I0130 00:21:54.589321 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-q97p8\" (UniqueName: \"kubernetes.io/projected/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-kube-api-access-q97p8\") pod \"redhat-operators-mrmfc\" (UID: \"9b9b5244-7804-41cd-b34a-7acaaa9b89e1\") " pod="openshift-marketplace/redhat-operators-mrmfc" Jan 30 00:21:54 crc kubenswrapper[5113]: I0130 00:21:54.702892 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mrmfc" Jan 30 00:21:57 crc kubenswrapper[5113]: I0130 00:21:57.130499 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" Jan 30 00:21:57 crc kubenswrapper[5113]: I0130 00:21:57.207096 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-util\") pod \"dc777063-84e1-4b8d-93b5-8ac8bbfee31d\" (UID: \"dc777063-84e1-4b8d-93b5-8ac8bbfee31d\") " Jan 30 00:21:57 crc kubenswrapper[5113]: I0130 00:21:57.207157 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-bundle\") pod \"dc777063-84e1-4b8d-93b5-8ac8bbfee31d\" (UID: \"dc777063-84e1-4b8d-93b5-8ac8bbfee31d\") " Jan 30 00:21:57 crc kubenswrapper[5113]: I0130 00:21:57.207331 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8lvn\" (UniqueName: \"kubernetes.io/projected/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-kube-api-access-z8lvn\") pod \"dc777063-84e1-4b8d-93b5-8ac8bbfee31d\" (UID: \"dc777063-84e1-4b8d-93b5-8ac8bbfee31d\") " Jan 30 00:21:57 crc kubenswrapper[5113]: I0130 00:21:57.211220 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-bundle" (OuterVolumeSpecName: "bundle") pod "dc777063-84e1-4b8d-93b5-8ac8bbfee31d" (UID: "dc777063-84e1-4b8d-93b5-8ac8bbfee31d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:21:57 crc kubenswrapper[5113]: I0130 00:21:57.218234 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-util" (OuterVolumeSpecName: "util") pod "dc777063-84e1-4b8d-93b5-8ac8bbfee31d" (UID: "dc777063-84e1-4b8d-93b5-8ac8bbfee31d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:21:57 crc kubenswrapper[5113]: I0130 00:21:57.224225 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-kube-api-access-z8lvn" (OuterVolumeSpecName: "kube-api-access-z8lvn") pod "dc777063-84e1-4b8d-93b5-8ac8bbfee31d" (UID: "dc777063-84e1-4b8d-93b5-8ac8bbfee31d"). InnerVolumeSpecName "kube-api-access-z8lvn". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:21:57 crc kubenswrapper[5113]: I0130 00:21:57.309072 5113 reconciler_common.go:299] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-util\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:57 crc kubenswrapper[5113]: I0130 00:21:57.309111 5113 reconciler_common.go:299] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:57 crc kubenswrapper[5113]: I0130 00:21:57.309124 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-z8lvn\" (UniqueName: \"kubernetes.io/projected/dc777063-84e1-4b8d-93b5-8ac8bbfee31d-kube-api-access-z8lvn\") on node \"crc\" DevicePath \"\"" Jan 30 00:21:58 crc kubenswrapper[5113]: I0130 00:21:58.052185 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" Jan 30 00:21:58 crc kubenswrapper[5113]: I0130 00:21:58.052173 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm" event={"ID":"dc777063-84e1-4b8d-93b5-8ac8bbfee31d","Type":"ContainerDied","Data":"fc39653aaf0dc6a509e71729406d0cf83af0d196c1d5d20ec64c3f4af4010018"} Jan 30 00:21:58 crc kubenswrapper[5113]: I0130 00:21:58.052339 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fc39653aaf0dc6a509e71729406d0cf83af0d196c1d5d20ec64c3f4af4010018" Jan 30 00:22:00 crc kubenswrapper[5113]: I0130 00:22:00.142555 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29495542-n5nbw"] Jan 30 00:22:00 crc kubenswrapper[5113]: I0130 00:22:00.143916 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="dc777063-84e1-4b8d-93b5-8ac8bbfee31d" containerName="util" Jan 30 00:22:00 crc kubenswrapper[5113]: I0130 00:22:00.143934 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc777063-84e1-4b8d-93b5-8ac8bbfee31d" containerName="util" Jan 30 00:22:00 crc kubenswrapper[5113]: I0130 00:22:00.143967 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="dc777063-84e1-4b8d-93b5-8ac8bbfee31d" containerName="pull" Jan 30 00:22:00 crc kubenswrapper[5113]: I0130 00:22:00.143973 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc777063-84e1-4b8d-93b5-8ac8bbfee31d" containerName="pull" Jan 30 00:22:00 crc kubenswrapper[5113]: I0130 00:22:00.143988 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="dc777063-84e1-4b8d-93b5-8ac8bbfee31d" containerName="extract" Jan 30 00:22:00 crc kubenswrapper[5113]: I0130 00:22:00.143997 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc777063-84e1-4b8d-93b5-8ac8bbfee31d" containerName="extract" Jan 30 00:22:00 crc kubenswrapper[5113]: I0130 00:22:00.144099 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="dc777063-84e1-4b8d-93b5-8ac8bbfee31d" containerName="extract" Jan 30 00:22:00 crc kubenswrapper[5113]: I0130 00:22:00.152982 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495542-n5nbw" Jan 30 00:22:00 crc kubenswrapper[5113]: I0130 00:22:00.156584 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-infra\"/\"csr-approver-sa-dockercfg-kshml\"" Jan 30 00:22:00 crc kubenswrapper[5113]: I0130 00:22:00.157338 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"kube-root-ca.crt\"" Jan 30 00:22:00 crc kubenswrapper[5113]: I0130 00:22:00.157534 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"openshift-service-ca.crt\"" Jan 30 00:22:00 crc kubenswrapper[5113]: I0130 00:22:00.186785 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495542-n5nbw"] Jan 30 00:22:00 crc kubenswrapper[5113]: I0130 00:22:00.265810 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmg6s\" (UniqueName: \"kubernetes.io/projected/42c94cc3-978a-4e0c-824f-07666143914d-kube-api-access-zmg6s\") pod \"auto-csr-approver-29495542-n5nbw\" (UID: \"42c94cc3-978a-4e0c-824f-07666143914d\") " pod="openshift-infra/auto-csr-approver-29495542-n5nbw" Jan 30 00:22:00 crc kubenswrapper[5113]: I0130 00:22:00.367642 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-zmg6s\" (UniqueName: \"kubernetes.io/projected/42c94cc3-978a-4e0c-824f-07666143914d-kube-api-access-zmg6s\") pod \"auto-csr-approver-29495542-n5nbw\" (UID: \"42c94cc3-978a-4e0c-824f-07666143914d\") " pod="openshift-infra/auto-csr-approver-29495542-n5nbw" Jan 30 00:22:00 crc kubenswrapper[5113]: I0130 00:22:00.402809 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmg6s\" (UniqueName: \"kubernetes.io/projected/42c94cc3-978a-4e0c-824f-07666143914d-kube-api-access-zmg6s\") pod \"auto-csr-approver-29495542-n5nbw\" (UID: \"42c94cc3-978a-4e0c-824f-07666143914d\") " pod="openshift-infra/auto-csr-approver-29495542-n5nbw" Jan 30 00:22:00 crc kubenswrapper[5113]: I0130 00:22:00.489385 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495542-n5nbw" Jan 30 00:22:02 crc kubenswrapper[5113]: I0130 00:22:02.940260 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-94644"] Jan 30 00:22:02 crc kubenswrapper[5113]: I0130 00:22:02.947665 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-94644" Jan 30 00:22:02 crc kubenswrapper[5113]: I0130 00:22:02.951001 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"cert-manager-operator\"/\"openshift-service-ca.crt\"" Jan 30 00:22:02 crc kubenswrapper[5113]: I0130 00:22:02.951045 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"cert-manager-operator\"/\"cert-manager-operator-controller-manager-dockercfg-8fct7\"" Jan 30 00:22:02 crc kubenswrapper[5113]: I0130 00:22:02.951401 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"cert-manager-operator\"/\"kube-root-ca.crt\"" Jan 30 00:22:02 crc kubenswrapper[5113]: I0130 00:22:02.979905 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-94644"] Jan 30 00:22:03 crc kubenswrapper[5113]: I0130 00:22:03.116126 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wr9lh\" (UniqueName: \"kubernetes.io/projected/fed9562a-0a07-46b0-8f10-9d3614e20db3-kube-api-access-wr9lh\") pod \"cert-manager-operator-controller-manager-7c5b8bd68-94644\" (UID: \"fed9562a-0a07-46b0-8f10-9d3614e20db3\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-94644" Jan 30 00:22:03 crc kubenswrapper[5113]: I0130 00:22:03.116218 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/fed9562a-0a07-46b0-8f10-9d3614e20db3-tmp\") pod \"cert-manager-operator-controller-manager-7c5b8bd68-94644\" (UID: \"fed9562a-0a07-46b0-8f10-9d3614e20db3\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-94644" Jan 30 00:22:03 crc kubenswrapper[5113]: I0130 00:22:03.217753 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/fed9562a-0a07-46b0-8f10-9d3614e20db3-tmp\") pod \"cert-manager-operator-controller-manager-7c5b8bd68-94644\" (UID: \"fed9562a-0a07-46b0-8f10-9d3614e20db3\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-94644" Jan 30 00:22:03 crc kubenswrapper[5113]: I0130 00:22:03.217846 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-wr9lh\" (UniqueName: \"kubernetes.io/projected/fed9562a-0a07-46b0-8f10-9d3614e20db3-kube-api-access-wr9lh\") pod \"cert-manager-operator-controller-manager-7c5b8bd68-94644\" (UID: \"fed9562a-0a07-46b0-8f10-9d3614e20db3\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-94644" Jan 30 00:22:03 crc kubenswrapper[5113]: I0130 00:22:03.218587 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/fed9562a-0a07-46b0-8f10-9d3614e20db3-tmp\") pod \"cert-manager-operator-controller-manager-7c5b8bd68-94644\" (UID: \"fed9562a-0a07-46b0-8f10-9d3614e20db3\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-94644" Jan 30 00:22:03 crc kubenswrapper[5113]: I0130 00:22:03.269144 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-wr9lh\" (UniqueName: \"kubernetes.io/projected/fed9562a-0a07-46b0-8f10-9d3614e20db3-kube-api-access-wr9lh\") pod \"cert-manager-operator-controller-manager-7c5b8bd68-94644\" (UID: \"fed9562a-0a07-46b0-8f10-9d3614e20db3\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-94644" Jan 30 00:22:03 crc kubenswrapper[5113]: I0130 00:22:03.564692 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-94644" Jan 30 00:22:04 crc kubenswrapper[5113]: E0130 00:22:04.779100 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:22:04 crc kubenswrapper[5113]: I0130 00:22:04.867189 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495542-n5nbw"] Jan 30 00:22:04 crc kubenswrapper[5113]: I0130 00:22:04.993105 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-94644"] Jan 30 00:22:05 crc kubenswrapper[5113]: I0130 00:22:05.117972 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-669c9f96b5-4c4gl" event={"ID":"8c07fd1a-e7bd-414d-a32e-68fda0f0e4a5","Type":"ContainerStarted","Data":"e28171c6c3dd7b6f75925e6cd279b9e25c406dd8ac87c32aec38fd05e2cef5d3"} Jan 30 00:22:05 crc kubenswrapper[5113]: I0130 00:22:05.118744 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-operators/perses-operator-669c9f96b5-4c4gl" Jan 30 00:22:05 crc kubenswrapper[5113]: I0130 00:22:05.121221 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-85c68dddb-l2hz8" event={"ID":"e4c6bf15-ffcf-4513-a573-c2a328472abe","Type":"ContainerStarted","Data":"9e4da60cc26a14da895ebd63463a808170e18b5328c50abc262ea2a2ba30d2be"} Jan 30 00:22:05 crc kubenswrapper[5113]: I0130 00:22:05.123461 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-operators/observability-operator-85c68dddb-l2hz8" Jan 30 00:22:05 crc kubenswrapper[5113]: I0130 00:22:05.135069 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-9bc85b4bf-twh29" event={"ID":"4444bab4-230c-48fb-b893-8d3e93807137","Type":"ContainerStarted","Data":"ffd9605a0f6fafc76fc6a54c008a53519c268dffad086e247eeea0d5b0cbf179"} Jan 30 00:22:05 crc kubenswrapper[5113]: I0130 00:22:05.138655 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk" event={"ID":"0059f278-9b5a-4441-bd68-67ee6c139623","Type":"ContainerStarted","Data":"ec9269d94b35d71d677140c9614756600982156ad100de6abdf39f652ddda06b"} Jan 30 00:22:05 crc kubenswrapper[5113]: I0130 00:22:05.140318 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495542-n5nbw" event={"ID":"42c94cc3-978a-4e0c-824f-07666143914d","Type":"ContainerStarted","Data":"a2fac522afdc1a43ba6bef7bdc4a8855aff21d837ab1bbc4afe98971aad7862b"} Jan 30 00:22:05 crc kubenswrapper[5113]: I0130 00:22:05.147925 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4" event={"ID":"9bc38c6e-f8a9-4383-b930-43e56fe731f7","Type":"ContainerStarted","Data":"e09a35cb8ef6fd4d83ed14688db395612324d7fe33e7c40b4e186cb37bca50f3"} Jan 30 00:22:05 crc kubenswrapper[5113]: I0130 00:22:05.152031 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-94644" event={"ID":"fed9562a-0a07-46b0-8f10-9d3614e20db3","Type":"ContainerStarted","Data":"158dcfabb0a835639f04f471f137ca72353f19fd4124ae5ee91d469586e3c5f2"} Jan 30 00:22:05 crc kubenswrapper[5113]: I0130 00:22:05.164995 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-669c9f96b5-4c4gl" podStartSLOduration=2.023357876 podStartE2EDuration="19.164967195s" podCreationTimestamp="2026-01-30 00:21:46 +0000 UTC" firstStartedPulling="2026-01-30 00:21:47.102506349 +0000 UTC m=+727.175111736" lastFinishedPulling="2026-01-30 00:22:04.244115678 +0000 UTC m=+744.316721055" observedRunningTime="2026-01-30 00:22:05.151587997 +0000 UTC m=+745.224193384" watchObservedRunningTime="2026-01-30 00:22:05.164967195 +0000 UTC m=+745.237572572" Jan 30 00:22:05 crc kubenswrapper[5113]: I0130 00:22:05.165298 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mrmfc"] Jan 30 00:22:05 crc kubenswrapper[5113]: I0130 00:22:05.198123 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-85c68dddb-l2hz8" Jan 30 00:22:05 crc kubenswrapper[5113]: I0130 00:22:05.288498 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk" podStartSLOduration=2.739253638 podStartE2EDuration="20.288448383s" podCreationTimestamp="2026-01-30 00:21:45 +0000 UTC" firstStartedPulling="2026-01-30 00:21:46.693977694 +0000 UTC m=+726.766583061" lastFinishedPulling="2026-01-30 00:22:04.243172429 +0000 UTC m=+744.315777806" observedRunningTime="2026-01-30 00:22:05.255421226 +0000 UTC m=+745.328026603" watchObservedRunningTime="2026-01-30 00:22:05.288448383 +0000 UTC m=+745.361053760" Jan 30 00:22:05 crc kubenswrapper[5113]: I0130 00:22:05.289787 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-85c68dddb-l2hz8" podStartSLOduration=2.139552701 podStartE2EDuration="19.289782034s" podCreationTimestamp="2026-01-30 00:21:46 +0000 UTC" firstStartedPulling="2026-01-30 00:21:47.16056991 +0000 UTC m=+727.233175287" lastFinishedPulling="2026-01-30 00:22:04.310799243 +0000 UTC m=+744.383404620" observedRunningTime="2026-01-30 00:22:05.198616472 +0000 UTC m=+745.271221849" watchObservedRunningTime="2026-01-30 00:22:05.289782034 +0000 UTC m=+745.362387411" Jan 30 00:22:05 crc kubenswrapper[5113]: I0130 00:22:05.366894 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4" podStartSLOduration=2.91236891 podStartE2EDuration="20.366864496s" podCreationTimestamp="2026-01-30 00:21:45 +0000 UTC" firstStartedPulling="2026-01-30 00:21:46.78104185 +0000 UTC m=+726.853647227" lastFinishedPulling="2026-01-30 00:22:04.235537446 +0000 UTC m=+744.308142813" observedRunningTime="2026-01-30 00:22:05.292350362 +0000 UTC m=+745.364955729" watchObservedRunningTime="2026-01-30 00:22:05.366864496 +0000 UTC m=+745.439469873" Jan 30 00:22:05 crc kubenswrapper[5113]: I0130 00:22:05.433314 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-9bc85b4bf-twh29" podStartSLOduration=2.738589537 podStartE2EDuration="20.433288432s" podCreationTimestamp="2026-01-30 00:21:45 +0000 UTC" firstStartedPulling="2026-01-30 00:21:46.549179596 +0000 UTC m=+726.621784963" lastFinishedPulling="2026-01-30 00:22:04.243878491 +0000 UTC m=+744.316483858" observedRunningTime="2026-01-30 00:22:05.369163976 +0000 UTC m=+745.441769363" watchObservedRunningTime="2026-01-30 00:22:05.433288432 +0000 UTC m=+745.505893809" Jan 30 00:22:06 crc kubenswrapper[5113]: I0130 00:22:06.192762 5113 generic.go:358] "Generic (PLEG): container finished" podID="9b9b5244-7804-41cd-b34a-7acaaa9b89e1" containerID="782acbc62466e32b96651c288e979d55f66d9997e7e7285977897e7dfc90daca" exitCode=0 Jan 30 00:22:06 crc kubenswrapper[5113]: I0130 00:22:06.193852 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrmfc" event={"ID":"9b9b5244-7804-41cd-b34a-7acaaa9b89e1","Type":"ContainerDied","Data":"782acbc62466e32b96651c288e979d55f66d9997e7e7285977897e7dfc90daca"} Jan 30 00:22:06 crc kubenswrapper[5113]: I0130 00:22:06.193928 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrmfc" event={"ID":"9b9b5244-7804-41cd-b34a-7acaaa9b89e1","Type":"ContainerStarted","Data":"8765cf6293941ccb6fd920b5e7178d2c900a51a08bc84a451af7d9302506af66"} Jan 30 00:22:07 crc kubenswrapper[5113]: I0130 00:22:07.213188 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495542-n5nbw" event={"ID":"42c94cc3-978a-4e0c-824f-07666143914d","Type":"ContainerStarted","Data":"f0c2111164a2fec15cfb261ec072172eb4e7ddc17fc443dfb57b78c86a591b66"} Jan 30 00:22:07 crc kubenswrapper[5113]: I0130 00:22:07.249127 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-infra/auto-csr-approver-29495542-n5nbw" podStartSLOduration=6.203882554 podStartE2EDuration="7.249103477s" podCreationTimestamp="2026-01-30 00:22:00 +0000 UTC" firstStartedPulling="2026-01-30 00:22:04.885919971 +0000 UTC m=+744.958525338" lastFinishedPulling="2026-01-30 00:22:05.931140884 +0000 UTC m=+746.003746261" observedRunningTime="2026-01-30 00:22:07.247343113 +0000 UTC m=+747.319948490" watchObservedRunningTime="2026-01-30 00:22:07.249103477 +0000 UTC m=+747.321708854" Jan 30 00:22:08 crc kubenswrapper[5113]: I0130 00:22:08.234264 5113 generic.go:358] "Generic (PLEG): container finished" podID="42c94cc3-978a-4e0c-824f-07666143914d" containerID="f0c2111164a2fec15cfb261ec072172eb4e7ddc17fc443dfb57b78c86a591b66" exitCode=0 Jan 30 00:22:08 crc kubenswrapper[5113]: I0130 00:22:08.234375 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495542-n5nbw" event={"ID":"42c94cc3-978a-4e0c-824f-07666143914d","Type":"ContainerDied","Data":"f0c2111164a2fec15cfb261ec072172eb4e7ddc17fc443dfb57b78c86a591b66"} Jan 30 00:22:08 crc kubenswrapper[5113]: I0130 00:22:08.245676 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrmfc" event={"ID":"9b9b5244-7804-41cd-b34a-7acaaa9b89e1","Type":"ContainerStarted","Data":"3f7fa8ca05e7ea154a12921769645d1629dbf360393e2a89b8d698e97fcb1a17"} Jan 30 00:22:09 crc kubenswrapper[5113]: I0130 00:22:09.256597 5113 generic.go:358] "Generic (PLEG): container finished" podID="9b9b5244-7804-41cd-b34a-7acaaa9b89e1" containerID="3f7fa8ca05e7ea154a12921769645d1629dbf360393e2a89b8d698e97fcb1a17" exitCode=0 Jan 30 00:22:09 crc kubenswrapper[5113]: I0130 00:22:09.258429 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrmfc" event={"ID":"9b9b5244-7804-41cd-b34a-7acaaa9b89e1","Type":"ContainerDied","Data":"3f7fa8ca05e7ea154a12921769645d1629dbf360393e2a89b8d698e97fcb1a17"} Jan 30 00:22:09 crc kubenswrapper[5113]: I0130 00:22:09.857138 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495542-n5nbw" Jan 30 00:22:10 crc kubenswrapper[5113]: I0130 00:22:10.036214 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmg6s\" (UniqueName: \"kubernetes.io/projected/42c94cc3-978a-4e0c-824f-07666143914d-kube-api-access-zmg6s\") pod \"42c94cc3-978a-4e0c-824f-07666143914d\" (UID: \"42c94cc3-978a-4e0c-824f-07666143914d\") " Jan 30 00:22:10 crc kubenswrapper[5113]: I0130 00:22:10.053784 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42c94cc3-978a-4e0c-824f-07666143914d-kube-api-access-zmg6s" (OuterVolumeSpecName: "kube-api-access-zmg6s") pod "42c94cc3-978a-4e0c-824f-07666143914d" (UID: "42c94cc3-978a-4e0c-824f-07666143914d"). InnerVolumeSpecName "kube-api-access-zmg6s". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:22:10 crc kubenswrapper[5113]: I0130 00:22:10.139100 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-zmg6s\" (UniqueName: \"kubernetes.io/projected/42c94cc3-978a-4e0c-824f-07666143914d-kube-api-access-zmg6s\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:10 crc kubenswrapper[5113]: I0130 00:22:10.266957 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495542-n5nbw" Jan 30 00:22:10 crc kubenswrapper[5113]: I0130 00:22:10.266992 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495542-n5nbw" event={"ID":"42c94cc3-978a-4e0c-824f-07666143914d","Type":"ContainerDied","Data":"a2fac522afdc1a43ba6bef7bdc4a8855aff21d837ab1bbc4afe98971aad7862b"} Jan 30 00:22:10 crc kubenswrapper[5113]: I0130 00:22:10.267065 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a2fac522afdc1a43ba6bef7bdc4a8855aff21d837ab1bbc4afe98971aad7862b" Jan 30 00:22:10 crc kubenswrapper[5113]: I0130 00:22:10.268820 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-94644" event={"ID":"fed9562a-0a07-46b0-8f10-9d3614e20db3","Type":"ContainerStarted","Data":"185a7780edcd27cf925f0e37a02b2adaedb402d9f71a45dfd9ea92a420d41a39"} Jan 30 00:22:10 crc kubenswrapper[5113]: I0130 00:22:10.272121 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrmfc" event={"ID":"9b9b5244-7804-41cd-b34a-7acaaa9b89e1","Type":"ContainerStarted","Data":"8d032a2ab815ab67a7e1826b89443a4298f76a5cd8a55bca804ff1c43523ad97"} Jan 30 00:22:10 crc kubenswrapper[5113]: I0130 00:22:10.297203 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-7c5b8bd68-94644" podStartSLOduration=3.410838437 podStartE2EDuration="8.297178351s" podCreationTimestamp="2026-01-30 00:22:02 +0000 UTC" firstStartedPulling="2026-01-30 00:22:05.008071728 +0000 UTC m=+745.080677105" lastFinishedPulling="2026-01-30 00:22:09.894411632 +0000 UTC m=+749.967017019" observedRunningTime="2026-01-30 00:22:10.295454989 +0000 UTC m=+750.368060376" watchObservedRunningTime="2026-01-30 00:22:10.297178351 +0000 UTC m=+750.369783738" Jan 30 00:22:12 crc kubenswrapper[5113]: I0130 00:22:12.687281 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mrmfc" podStartSLOduration=17.735321341 podStartE2EDuration="18.687253847s" podCreationTimestamp="2026-01-30 00:21:54 +0000 UTC" firstStartedPulling="2026-01-30 00:22:06.194146418 +0000 UTC m=+746.266751795" lastFinishedPulling="2026-01-30 00:22:07.146078924 +0000 UTC m=+747.218684301" observedRunningTime="2026-01-30 00:22:10.32796542 +0000 UTC m=+750.400570797" watchObservedRunningTime="2026-01-30 00:22:12.687253847 +0000 UTC m=+752.759859224" Jan 30 00:22:12 crc kubenswrapper[5113]: I0130 00:22:12.689616 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-597b96b99b-dzs4m"] Jan 30 00:22:12 crc kubenswrapper[5113]: I0130 00:22:12.690324 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="42c94cc3-978a-4e0c-824f-07666143914d" containerName="oc" Jan 30 00:22:12 crc kubenswrapper[5113]: I0130 00:22:12.690344 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="42c94cc3-978a-4e0c-824f-07666143914d" containerName="oc" Jan 30 00:22:12 crc kubenswrapper[5113]: I0130 00:22:12.690475 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="42c94cc3-978a-4e0c-824f-07666143914d" containerName="oc" Jan 30 00:22:12 crc kubenswrapper[5113]: I0130 00:22:12.802096 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-597b96b99b-dzs4m" Jan 30 00:22:12 crc kubenswrapper[5113]: I0130 00:22:12.805459 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"cert-manager\"/\"kube-root-ca.crt\"" Jan 30 00:22:12 crc kubenswrapper[5113]: I0130 00:22:12.805604 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"cert-manager\"/\"openshift-service-ca.crt\"" Jan 30 00:22:12 crc kubenswrapper[5113]: I0130 00:22:12.805686 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"cert-manager\"/\"cert-manager-webhook-dockercfg-w5xsk\"" Jan 30 00:22:12 crc kubenswrapper[5113]: I0130 00:22:12.810682 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-597b96b99b-dzs4m"] Jan 30 00:22:12 crc kubenswrapper[5113]: I0130 00:22:12.984359 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e6d68b87-9e39-420a-bc88-c3bc641ff82e-bound-sa-token\") pod \"cert-manager-webhook-597b96b99b-dzs4m\" (UID: \"e6d68b87-9e39-420a-bc88-c3bc641ff82e\") " pod="cert-manager/cert-manager-webhook-597b96b99b-dzs4m" Jan 30 00:22:12 crc kubenswrapper[5113]: I0130 00:22:12.985653 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdc7h\" (UniqueName: \"kubernetes.io/projected/e6d68b87-9e39-420a-bc88-c3bc641ff82e-kube-api-access-xdc7h\") pod \"cert-manager-webhook-597b96b99b-dzs4m\" (UID: \"e6d68b87-9e39-420a-bc88-c3bc641ff82e\") " pod="cert-manager/cert-manager-webhook-597b96b99b-dzs4m" Jan 30 00:22:13 crc kubenswrapper[5113]: I0130 00:22:13.087293 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-xdc7h\" (UniqueName: \"kubernetes.io/projected/e6d68b87-9e39-420a-bc88-c3bc641ff82e-kube-api-access-xdc7h\") pod \"cert-manager-webhook-597b96b99b-dzs4m\" (UID: \"e6d68b87-9e39-420a-bc88-c3bc641ff82e\") " pod="cert-manager/cert-manager-webhook-597b96b99b-dzs4m" Jan 30 00:22:13 crc kubenswrapper[5113]: I0130 00:22:13.087801 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e6d68b87-9e39-420a-bc88-c3bc641ff82e-bound-sa-token\") pod \"cert-manager-webhook-597b96b99b-dzs4m\" (UID: \"e6d68b87-9e39-420a-bc88-c3bc641ff82e\") " pod="cert-manager/cert-manager-webhook-597b96b99b-dzs4m" Jan 30 00:22:13 crc kubenswrapper[5113]: I0130 00:22:13.119030 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e6d68b87-9e39-420a-bc88-c3bc641ff82e-bound-sa-token\") pod \"cert-manager-webhook-597b96b99b-dzs4m\" (UID: \"e6d68b87-9e39-420a-bc88-c3bc641ff82e\") " pod="cert-manager/cert-manager-webhook-597b96b99b-dzs4m" Jan 30 00:22:13 crc kubenswrapper[5113]: I0130 00:22:13.136387 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdc7h\" (UniqueName: \"kubernetes.io/projected/e6d68b87-9e39-420a-bc88-c3bc641ff82e-kube-api-access-xdc7h\") pod \"cert-manager-webhook-597b96b99b-dzs4m\" (UID: \"e6d68b87-9e39-420a-bc88-c3bc641ff82e\") " pod="cert-manager/cert-manager-webhook-597b96b99b-dzs4m" Jan 30 00:22:13 crc kubenswrapper[5113]: I0130 00:22:13.422839 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-597b96b99b-dzs4m" Jan 30 00:22:13 crc kubenswrapper[5113]: I0130 00:22:13.740384 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-597b96b99b-dzs4m"] Jan 30 00:22:14 crc kubenswrapper[5113]: I0130 00:22:14.302836 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-597b96b99b-dzs4m" event={"ID":"e6d68b87-9e39-420a-bc88-c3bc641ff82e","Type":"ContainerStarted","Data":"199dd96cdaae3bcfea3ac062d3d7920eca1207ce8d7a3ba45c538a46653dc2f9"} Jan 30 00:22:14 crc kubenswrapper[5113]: I0130 00:22:14.703533 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mrmfc" Jan 30 00:22:14 crc kubenswrapper[5113]: I0130 00:22:14.704668 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/redhat-operators-mrmfc" Jan 30 00:22:15 crc kubenswrapper[5113]: I0130 00:22:15.761735 5113 prober.go:120] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mrmfc" podUID="9b9b5244-7804-41cd-b34a-7acaaa9b89e1" containerName="registry-server" probeResult="failure" output=< Jan 30 00:22:15 crc kubenswrapper[5113]: timeout: failed to connect service ":50051" within 1s Jan 30 00:22:15 crc kubenswrapper[5113]: > Jan 30 00:22:16 crc kubenswrapper[5113]: I0130 00:22:16.198401 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-669c9f96b5-4c4gl" Jan 30 00:22:16 crc kubenswrapper[5113]: I0130 00:22:16.384080 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-8966b78d4-4r2ng"] Jan 30 00:22:16 crc kubenswrapper[5113]: I0130 00:22:16.546161 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-8966b78d4-4r2ng"] Jan 30 00:22:16 crc kubenswrapper[5113]: I0130 00:22:16.546379 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-8966b78d4-4r2ng" Jan 30 00:22:16 crc kubenswrapper[5113]: I0130 00:22:16.549961 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"cert-manager\"/\"cert-manager-cainjector-dockercfg-9mgg6\"" Jan 30 00:22:16 crc kubenswrapper[5113]: I0130 00:22:16.645495 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f19fa386-988c-4eab-8cb4-383aa28e594b-bound-sa-token\") pod \"cert-manager-cainjector-8966b78d4-4r2ng\" (UID: \"f19fa386-988c-4eab-8cb4-383aa28e594b\") " pod="cert-manager/cert-manager-cainjector-8966b78d4-4r2ng" Jan 30 00:22:16 crc kubenswrapper[5113]: I0130 00:22:16.645649 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fz9k4\" (UniqueName: \"kubernetes.io/projected/f19fa386-988c-4eab-8cb4-383aa28e594b-kube-api-access-fz9k4\") pod \"cert-manager-cainjector-8966b78d4-4r2ng\" (UID: \"f19fa386-988c-4eab-8cb4-383aa28e594b\") " pod="cert-manager/cert-manager-cainjector-8966b78d4-4r2ng" Jan 30 00:22:16 crc kubenswrapper[5113]: I0130 00:22:16.747194 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f19fa386-988c-4eab-8cb4-383aa28e594b-bound-sa-token\") pod \"cert-manager-cainjector-8966b78d4-4r2ng\" (UID: \"f19fa386-988c-4eab-8cb4-383aa28e594b\") " pod="cert-manager/cert-manager-cainjector-8966b78d4-4r2ng" Jan 30 00:22:16 crc kubenswrapper[5113]: I0130 00:22:16.747265 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-fz9k4\" (UniqueName: \"kubernetes.io/projected/f19fa386-988c-4eab-8cb4-383aa28e594b-kube-api-access-fz9k4\") pod \"cert-manager-cainjector-8966b78d4-4r2ng\" (UID: \"f19fa386-988c-4eab-8cb4-383aa28e594b\") " pod="cert-manager/cert-manager-cainjector-8966b78d4-4r2ng" Jan 30 00:22:16 crc kubenswrapper[5113]: I0130 00:22:16.782047 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-fz9k4\" (UniqueName: \"kubernetes.io/projected/f19fa386-988c-4eab-8cb4-383aa28e594b-kube-api-access-fz9k4\") pod \"cert-manager-cainjector-8966b78d4-4r2ng\" (UID: \"f19fa386-988c-4eab-8cb4-383aa28e594b\") " pod="cert-manager/cert-manager-cainjector-8966b78d4-4r2ng" Jan 30 00:22:16 crc kubenswrapper[5113]: I0130 00:22:16.782838 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f19fa386-988c-4eab-8cb4-383aa28e594b-bound-sa-token\") pod \"cert-manager-cainjector-8966b78d4-4r2ng\" (UID: \"f19fa386-988c-4eab-8cb4-383aa28e594b\") " pod="cert-manager/cert-manager-cainjector-8966b78d4-4r2ng" Jan 30 00:22:16 crc kubenswrapper[5113]: I0130 00:22:16.877291 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-8966b78d4-4r2ng" Jan 30 00:22:17 crc kubenswrapper[5113]: E0130 00:22:17.536138 5113 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" image="registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb" Jan 30 00:22:17 crc kubenswrapper[5113]: E0130 00:22:17.536424 5113 kuberuntime_manager.go:1358] "Unhandled Error" err="init container &Container{Name:pull,Image:registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb,Command:[/util/cpb /bundle],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:bundle,ReadOnly:false,MountPath:/bundle,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:util,ReadOnly:false,MountPath:/util,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pxmz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000240000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46_openshift-marketplace(37e551ce-ff20-486b-986a-429cb060e341): ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" logger="UnhandledError" Jan 30 00:22:17 crc kubenswrapper[5113]: E0130 00:22:17.537720 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ErrImagePull: \"unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:22:17 crc kubenswrapper[5113]: I0130 00:22:17.626313 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-8966b78d4-4r2ng"] Jan 30 00:22:17 crc kubenswrapper[5113]: W0130 00:22:17.655129 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf19fa386_988c_4eab_8cb4_383aa28e594b.slice/crio-6a21ca65bdc1531ac6443faa28ab72a32217b891671f512045a9a05e0470677f WatchSource:0}: Error finding container 6a21ca65bdc1531ac6443faa28ab72a32217b891671f512045a9a05e0470677f: Status 404 returned error can't find the container with id 6a21ca65bdc1531ac6443faa28ab72a32217b891671f512045a9a05e0470677f Jan 30 00:22:18 crc kubenswrapper[5113]: I0130 00:22:18.340209 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-8966b78d4-4r2ng" event={"ID":"f19fa386-988c-4eab-8cb4-383aa28e594b","Type":"ContainerStarted","Data":"6a21ca65bdc1531ac6443faa28ab72a32217b891671f512045a9a05e0470677f"} Jan 30 00:22:21 crc kubenswrapper[5113]: I0130 00:22:21.197092 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:22:21 crc kubenswrapper[5113]: I0130 00:22:21.197827 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:22:22 crc kubenswrapper[5113]: I0130 00:22:22.375841 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-8966b78d4-4r2ng" event={"ID":"f19fa386-988c-4eab-8cb4-383aa28e594b","Type":"ContainerStarted","Data":"a044866738ee48bfa646b5bceb5b794fd4adbd0eb45c40a2cf1b60a6c010d6f6"} Jan 30 00:22:22 crc kubenswrapper[5113]: I0130 00:22:22.377672 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-597b96b99b-dzs4m" event={"ID":"e6d68b87-9e39-420a-bc88-c3bc641ff82e","Type":"ContainerStarted","Data":"547bbbb141642cb1a09790f5448e77bf993a6f82e3ff77763c29f4287eec0ef0"} Jan 30 00:22:22 crc kubenswrapper[5113]: I0130 00:22:22.377805 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="cert-manager/cert-manager-webhook-597b96b99b-dzs4m" Jan 30 00:22:22 crc kubenswrapper[5113]: I0130 00:22:22.432294 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-8966b78d4-4r2ng" podStartSLOduration=2.042897589 podStartE2EDuration="6.432273129s" podCreationTimestamp="2026-01-30 00:22:16 +0000 UTC" firstStartedPulling="2026-01-30 00:22:17.663435762 +0000 UTC m=+757.736041129" lastFinishedPulling="2026-01-30 00:22:22.052811292 +0000 UTC m=+762.125416669" observedRunningTime="2026-01-30 00:22:22.429367301 +0000 UTC m=+762.501972678" watchObservedRunningTime="2026-01-30 00:22:22.432273129 +0000 UTC m=+762.504878506" Jan 30 00:22:22 crc kubenswrapper[5113]: I0130 00:22:22.459421 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-597b96b99b-dzs4m" podStartSLOduration=2.179286423 podStartE2EDuration="10.459392037s" podCreationTimestamp="2026-01-30 00:22:12 +0000 UTC" firstStartedPulling="2026-01-30 00:22:13.757104921 +0000 UTC m=+753.829710298" lastFinishedPulling="2026-01-30 00:22:22.037210535 +0000 UTC m=+762.109815912" observedRunningTime="2026-01-30 00:22:22.456366545 +0000 UTC m=+762.528971922" watchObservedRunningTime="2026-01-30 00:22:22.459392037 +0000 UTC m=+762.531997414" Jan 30 00:22:24 crc kubenswrapper[5113]: I0130 00:22:24.782991 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mrmfc" Jan 30 00:22:24 crc kubenswrapper[5113]: I0130 00:22:24.828055 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mrmfc" Jan 30 00:22:25 crc kubenswrapper[5113]: I0130 00:22:25.556067 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mrmfc"] Jan 30 00:22:26 crc kubenswrapper[5113]: I0130 00:22:26.407921 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mrmfc" podUID="9b9b5244-7804-41cd-b34a-7acaaa9b89e1" containerName="registry-server" containerID="cri-o://8d032a2ab815ab67a7e1826b89443a4298f76a5cd8a55bca804ff1c43523ad97" gracePeriod=2 Jan 30 00:22:26 crc kubenswrapper[5113]: I0130 00:22:26.862561 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mrmfc" Jan 30 00:22:26 crc kubenswrapper[5113]: I0130 00:22:26.937426 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-catalog-content\") pod \"9b9b5244-7804-41cd-b34a-7acaaa9b89e1\" (UID: \"9b9b5244-7804-41cd-b34a-7acaaa9b89e1\") " Jan 30 00:22:26 crc kubenswrapper[5113]: I0130 00:22:26.937775 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q97p8\" (UniqueName: \"kubernetes.io/projected/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-kube-api-access-q97p8\") pod \"9b9b5244-7804-41cd-b34a-7acaaa9b89e1\" (UID: \"9b9b5244-7804-41cd-b34a-7acaaa9b89e1\") " Jan 30 00:22:26 crc kubenswrapper[5113]: I0130 00:22:26.937830 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-utilities\") pod \"9b9b5244-7804-41cd-b34a-7acaaa9b89e1\" (UID: \"9b9b5244-7804-41cd-b34a-7acaaa9b89e1\") " Jan 30 00:22:26 crc kubenswrapper[5113]: I0130 00:22:26.939931 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-utilities" (OuterVolumeSpecName: "utilities") pod "9b9b5244-7804-41cd-b34a-7acaaa9b89e1" (UID: "9b9b5244-7804-41cd-b34a-7acaaa9b89e1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:22:26 crc kubenswrapper[5113]: I0130 00:22:26.948000 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-kube-api-access-q97p8" (OuterVolumeSpecName: "kube-api-access-q97p8") pod "9b9b5244-7804-41cd-b34a-7acaaa9b89e1" (UID: "9b9b5244-7804-41cd-b34a-7acaaa9b89e1"). InnerVolumeSpecName "kube-api-access-q97p8". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.040141 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-q97p8\" (UniqueName: \"kubernetes.io/projected/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-kube-api-access-q97p8\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.040509 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.044084 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9b9b5244-7804-41cd-b34a-7acaaa9b89e1" (UID: "9b9b5244-7804-41cd-b34a-7acaaa9b89e1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.142508 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b9b5244-7804-41cd-b34a-7acaaa9b89e1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.418883 5113 generic.go:358] "Generic (PLEG): container finished" podID="9b9b5244-7804-41cd-b34a-7acaaa9b89e1" containerID="8d032a2ab815ab67a7e1826b89443a4298f76a5cd8a55bca804ff1c43523ad97" exitCode=0 Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.419023 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mrmfc" Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.419017 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrmfc" event={"ID":"9b9b5244-7804-41cd-b34a-7acaaa9b89e1","Type":"ContainerDied","Data":"8d032a2ab815ab67a7e1826b89443a4298f76a5cd8a55bca804ff1c43523ad97"} Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.421024 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mrmfc" event={"ID":"9b9b5244-7804-41cd-b34a-7acaaa9b89e1","Type":"ContainerDied","Data":"8765cf6293941ccb6fd920b5e7178d2c900a51a08bc84a451af7d9302506af66"} Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.421103 5113 scope.go:117] "RemoveContainer" containerID="8d032a2ab815ab67a7e1826b89443a4298f76a5cd8a55bca804ff1c43523ad97" Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.446351 5113 scope.go:117] "RemoveContainer" containerID="3f7fa8ca05e7ea154a12921769645d1629dbf360393e2a89b8d698e97fcb1a17" Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.483028 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mrmfc"] Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.486512 5113 scope.go:117] "RemoveContainer" containerID="782acbc62466e32b96651c288e979d55f66d9997e7e7285977897e7dfc90daca" Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.488507 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mrmfc"] Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.511602 5113 scope.go:117] "RemoveContainer" containerID="8d032a2ab815ab67a7e1826b89443a4298f76a5cd8a55bca804ff1c43523ad97" Jan 30 00:22:27 crc kubenswrapper[5113]: E0130 00:22:27.512131 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d032a2ab815ab67a7e1826b89443a4298f76a5cd8a55bca804ff1c43523ad97\": container with ID starting with 8d032a2ab815ab67a7e1826b89443a4298f76a5cd8a55bca804ff1c43523ad97 not found: ID does not exist" containerID="8d032a2ab815ab67a7e1826b89443a4298f76a5cd8a55bca804ff1c43523ad97" Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.512192 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d032a2ab815ab67a7e1826b89443a4298f76a5cd8a55bca804ff1c43523ad97"} err="failed to get container status \"8d032a2ab815ab67a7e1826b89443a4298f76a5cd8a55bca804ff1c43523ad97\": rpc error: code = NotFound desc = could not find container \"8d032a2ab815ab67a7e1826b89443a4298f76a5cd8a55bca804ff1c43523ad97\": container with ID starting with 8d032a2ab815ab67a7e1826b89443a4298f76a5cd8a55bca804ff1c43523ad97 not found: ID does not exist" Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.512231 5113 scope.go:117] "RemoveContainer" containerID="3f7fa8ca05e7ea154a12921769645d1629dbf360393e2a89b8d698e97fcb1a17" Jan 30 00:22:27 crc kubenswrapper[5113]: E0130 00:22:27.512863 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f7fa8ca05e7ea154a12921769645d1629dbf360393e2a89b8d698e97fcb1a17\": container with ID starting with 3f7fa8ca05e7ea154a12921769645d1629dbf360393e2a89b8d698e97fcb1a17 not found: ID does not exist" containerID="3f7fa8ca05e7ea154a12921769645d1629dbf360393e2a89b8d698e97fcb1a17" Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.512920 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f7fa8ca05e7ea154a12921769645d1629dbf360393e2a89b8d698e97fcb1a17"} err="failed to get container status \"3f7fa8ca05e7ea154a12921769645d1629dbf360393e2a89b8d698e97fcb1a17\": rpc error: code = NotFound desc = could not find container \"3f7fa8ca05e7ea154a12921769645d1629dbf360393e2a89b8d698e97fcb1a17\": container with ID starting with 3f7fa8ca05e7ea154a12921769645d1629dbf360393e2a89b8d698e97fcb1a17 not found: ID does not exist" Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.512957 5113 scope.go:117] "RemoveContainer" containerID="782acbc62466e32b96651c288e979d55f66d9997e7e7285977897e7dfc90daca" Jan 30 00:22:27 crc kubenswrapper[5113]: E0130 00:22:27.513479 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"782acbc62466e32b96651c288e979d55f66d9997e7e7285977897e7dfc90daca\": container with ID starting with 782acbc62466e32b96651c288e979d55f66d9997e7e7285977897e7dfc90daca not found: ID does not exist" containerID="782acbc62466e32b96651c288e979d55f66d9997e7e7285977897e7dfc90daca" Jan 30 00:22:27 crc kubenswrapper[5113]: I0130 00:22:27.513566 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"782acbc62466e32b96651c288e979d55f66d9997e7e7285977897e7dfc90daca"} err="failed to get container status \"782acbc62466e32b96651c288e979d55f66d9997e7e7285977897e7dfc90daca\": rpc error: code = NotFound desc = could not find container \"782acbc62466e32b96651c288e979d55f66d9997e7e7285977897e7dfc90daca\": container with ID starting with 782acbc62466e32b96651c288e979d55f66d9997e7e7285977897e7dfc90daca not found: ID does not exist" Jan 30 00:22:28 crc kubenswrapper[5113]: I0130 00:22:28.388568 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-597b96b99b-dzs4m" Jan 30 00:22:28 crc kubenswrapper[5113]: I0130 00:22:28.785144 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b9b5244-7804-41cd-b34a-7acaaa9b89e1" path="/var/lib/kubelet/pods/9b9b5244-7804-41cd-b34a-7acaaa9b89e1/volumes" Jan 30 00:22:30 crc kubenswrapper[5113]: E0130 00:22:30.785953 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:22:31 crc kubenswrapper[5113]: I0130 00:22:31.840658 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-759f64656b-q8d2f"] Jan 30 00:22:31 crc kubenswrapper[5113]: I0130 00:22:31.841611 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="9b9b5244-7804-41cd-b34a-7acaaa9b89e1" containerName="extract-content" Jan 30 00:22:31 crc kubenswrapper[5113]: I0130 00:22:31.841631 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b9b5244-7804-41cd-b34a-7acaaa9b89e1" containerName="extract-content" Jan 30 00:22:31 crc kubenswrapper[5113]: I0130 00:22:31.841645 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="9b9b5244-7804-41cd-b34a-7acaaa9b89e1" containerName="extract-utilities" Jan 30 00:22:31 crc kubenswrapper[5113]: I0130 00:22:31.841652 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b9b5244-7804-41cd-b34a-7acaaa9b89e1" containerName="extract-utilities" Jan 30 00:22:31 crc kubenswrapper[5113]: I0130 00:22:31.841684 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="9b9b5244-7804-41cd-b34a-7acaaa9b89e1" containerName="registry-server" Jan 30 00:22:31 crc kubenswrapper[5113]: I0130 00:22:31.841691 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b9b5244-7804-41cd-b34a-7acaaa9b89e1" containerName="registry-server" Jan 30 00:22:31 crc kubenswrapper[5113]: I0130 00:22:31.841916 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="9b9b5244-7804-41cd-b34a-7acaaa9b89e1" containerName="registry-server" Jan 30 00:22:31 crc kubenswrapper[5113]: I0130 00:22:31.847186 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-759f64656b-q8d2f" Jan 30 00:22:31 crc kubenswrapper[5113]: I0130 00:22:31.853020 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"cert-manager\"/\"cert-manager-dockercfg-72tbj\"" Jan 30 00:22:31 crc kubenswrapper[5113]: I0130 00:22:31.858635 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-759f64656b-q8d2f"] Jan 30 00:22:31 crc kubenswrapper[5113]: I0130 00:22:31.922966 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/76bccc71-5eff-4487-bf13-b106e73ff811-bound-sa-token\") pod \"cert-manager-759f64656b-q8d2f\" (UID: \"76bccc71-5eff-4487-bf13-b106e73ff811\") " pod="cert-manager/cert-manager-759f64656b-q8d2f" Jan 30 00:22:31 crc kubenswrapper[5113]: I0130 00:22:31.923039 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72rk7\" (UniqueName: \"kubernetes.io/projected/76bccc71-5eff-4487-bf13-b106e73ff811-kube-api-access-72rk7\") pod \"cert-manager-759f64656b-q8d2f\" (UID: \"76bccc71-5eff-4487-bf13-b106e73ff811\") " pod="cert-manager/cert-manager-759f64656b-q8d2f" Jan 30 00:22:32 crc kubenswrapper[5113]: I0130 00:22:32.024450 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-72rk7\" (UniqueName: \"kubernetes.io/projected/76bccc71-5eff-4487-bf13-b106e73ff811-kube-api-access-72rk7\") pod \"cert-manager-759f64656b-q8d2f\" (UID: \"76bccc71-5eff-4487-bf13-b106e73ff811\") " pod="cert-manager/cert-manager-759f64656b-q8d2f" Jan 30 00:22:32 crc kubenswrapper[5113]: I0130 00:22:32.025206 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/76bccc71-5eff-4487-bf13-b106e73ff811-bound-sa-token\") pod \"cert-manager-759f64656b-q8d2f\" (UID: \"76bccc71-5eff-4487-bf13-b106e73ff811\") " pod="cert-manager/cert-manager-759f64656b-q8d2f" Jan 30 00:22:32 crc kubenswrapper[5113]: I0130 00:22:32.049115 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/76bccc71-5eff-4487-bf13-b106e73ff811-bound-sa-token\") pod \"cert-manager-759f64656b-q8d2f\" (UID: \"76bccc71-5eff-4487-bf13-b106e73ff811\") " pod="cert-manager/cert-manager-759f64656b-q8d2f" Jan 30 00:22:32 crc kubenswrapper[5113]: I0130 00:22:32.049229 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-72rk7\" (UniqueName: \"kubernetes.io/projected/76bccc71-5eff-4487-bf13-b106e73ff811-kube-api-access-72rk7\") pod \"cert-manager-759f64656b-q8d2f\" (UID: \"76bccc71-5eff-4487-bf13-b106e73ff811\") " pod="cert-manager/cert-manager-759f64656b-q8d2f" Jan 30 00:22:32 crc kubenswrapper[5113]: I0130 00:22:32.170145 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-759f64656b-q8d2f" Jan 30 00:22:32 crc kubenswrapper[5113]: I0130 00:22:32.403573 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-759f64656b-q8d2f"] Jan 30 00:22:32 crc kubenswrapper[5113]: W0130 00:22:32.408679 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod76bccc71_5eff_4487_bf13_b106e73ff811.slice/crio-d79c99a57d785a18e25cea58ddaae02d58a7ae545059f8b2c20def414cca43e5 WatchSource:0}: Error finding container d79c99a57d785a18e25cea58ddaae02d58a7ae545059f8b2c20def414cca43e5: Status 404 returned error can't find the container with id d79c99a57d785a18e25cea58ddaae02d58a7ae545059f8b2c20def414cca43e5 Jan 30 00:22:32 crc kubenswrapper[5113]: I0130 00:22:32.461948 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-759f64656b-q8d2f" event={"ID":"76bccc71-5eff-4487-bf13-b106e73ff811","Type":"ContainerStarted","Data":"d79c99a57d785a18e25cea58ddaae02d58a7ae545059f8b2c20def414cca43e5"} Jan 30 00:22:33 crc kubenswrapper[5113]: I0130 00:22:33.474005 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-759f64656b-q8d2f" event={"ID":"76bccc71-5eff-4487-bf13-b106e73ff811","Type":"ContainerStarted","Data":"aeff48e7fbe3ee2eb16caa54a239ac4bc60d703e8cb0a7b764d1bbc422568378"} Jan 30 00:22:33 crc kubenswrapper[5113]: I0130 00:22:33.499052 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-759f64656b-q8d2f" podStartSLOduration=2.499020091 podStartE2EDuration="2.499020091s" podCreationTimestamp="2026-01-30 00:22:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 00:22:33.493327937 +0000 UTC m=+773.565933354" watchObservedRunningTime="2026-01-30 00:22:33.499020091 +0000 UTC m=+773.571625478" Jan 30 00:22:39 crc kubenswrapper[5113]: I0130 00:22:39.350706 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mzrsb"] Jan 30 00:22:39 crc kubenswrapper[5113]: I0130 00:22:39.368619 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mzrsb" Jan 30 00:22:39 crc kubenswrapper[5113]: I0130 00:22:39.392187 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mzrsb"] Jan 30 00:22:39 crc kubenswrapper[5113]: I0130 00:22:39.462312 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-utilities\") pod \"community-operators-mzrsb\" (UID: \"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491\") " pod="openshift-marketplace/community-operators-mzrsb" Jan 30 00:22:39 crc kubenswrapper[5113]: I0130 00:22:39.462384 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-catalog-content\") pod \"community-operators-mzrsb\" (UID: \"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491\") " pod="openshift-marketplace/community-operators-mzrsb" Jan 30 00:22:39 crc kubenswrapper[5113]: I0130 00:22:39.462499 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzfhs\" (UniqueName: \"kubernetes.io/projected/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-kube-api-access-bzfhs\") pod \"community-operators-mzrsb\" (UID: \"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491\") " pod="openshift-marketplace/community-operators-mzrsb" Jan 30 00:22:39 crc kubenswrapper[5113]: I0130 00:22:39.564066 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-utilities\") pod \"community-operators-mzrsb\" (UID: \"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491\") " pod="openshift-marketplace/community-operators-mzrsb" Jan 30 00:22:39 crc kubenswrapper[5113]: I0130 00:22:39.564165 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-catalog-content\") pod \"community-operators-mzrsb\" (UID: \"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491\") " pod="openshift-marketplace/community-operators-mzrsb" Jan 30 00:22:39 crc kubenswrapper[5113]: I0130 00:22:39.564213 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-bzfhs\" (UniqueName: \"kubernetes.io/projected/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-kube-api-access-bzfhs\") pod \"community-operators-mzrsb\" (UID: \"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491\") " pod="openshift-marketplace/community-operators-mzrsb" Jan 30 00:22:39 crc kubenswrapper[5113]: I0130 00:22:39.564914 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-utilities\") pod \"community-operators-mzrsb\" (UID: \"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491\") " pod="openshift-marketplace/community-operators-mzrsb" Jan 30 00:22:39 crc kubenswrapper[5113]: I0130 00:22:39.565092 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-catalog-content\") pod \"community-operators-mzrsb\" (UID: \"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491\") " pod="openshift-marketplace/community-operators-mzrsb" Jan 30 00:22:39 crc kubenswrapper[5113]: I0130 00:22:39.591780 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzfhs\" (UniqueName: \"kubernetes.io/projected/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-kube-api-access-bzfhs\") pod \"community-operators-mzrsb\" (UID: \"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491\") " pod="openshift-marketplace/community-operators-mzrsb" Jan 30 00:22:39 crc kubenswrapper[5113]: I0130 00:22:39.728253 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mzrsb" Jan 30 00:22:39 crc kubenswrapper[5113]: I0130 00:22:39.980994 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mzrsb"] Jan 30 00:22:40 crc kubenswrapper[5113]: I0130 00:22:40.534589 5113 generic.go:358] "Generic (PLEG): container finished" podID="8a1c3e83-e2d8-4e4e-a292-1eb97cd81491" containerID="2755a27ae1206f14960f6091b9075816b15e2a22bed5581c5c7f627905ad4189" exitCode=0 Jan 30 00:22:40 crc kubenswrapper[5113]: I0130 00:22:40.534649 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mzrsb" event={"ID":"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491","Type":"ContainerDied","Data":"2755a27ae1206f14960f6091b9075816b15e2a22bed5581c5c7f627905ad4189"} Jan 30 00:22:40 crc kubenswrapper[5113]: I0130 00:22:40.534919 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mzrsb" event={"ID":"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491","Type":"ContainerStarted","Data":"17d7445c6a8955669c03421ca9e7c3619cbc5c7e826b2308f677893050bc3431"} Jan 30 00:22:41 crc kubenswrapper[5113]: I0130 00:22:41.545537 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mzrsb" event={"ID":"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491","Type":"ContainerStarted","Data":"b85823ad6defddbff801a7291fe3ec6810d21fca718df617564b46eb8b5c2963"} Jan 30 00:22:41 crc kubenswrapper[5113]: E0130 00:22:41.775964 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:22:42 crc kubenswrapper[5113]: I0130 00:22:42.558289 5113 generic.go:358] "Generic (PLEG): container finished" podID="8a1c3e83-e2d8-4e4e-a292-1eb97cd81491" containerID="b85823ad6defddbff801a7291fe3ec6810d21fca718df617564b46eb8b5c2963" exitCode=0 Jan 30 00:22:42 crc kubenswrapper[5113]: I0130 00:22:42.558493 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mzrsb" event={"ID":"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491","Type":"ContainerDied","Data":"b85823ad6defddbff801a7291fe3ec6810d21fca718df617564b46eb8b5c2963"} Jan 30 00:22:43 crc kubenswrapper[5113]: I0130 00:22:43.568310 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mzrsb" event={"ID":"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491","Type":"ContainerStarted","Data":"fd9c68aca58ba0ea2dd75608a5fd43fdf2cc49d73b405d885bf9fb4dcf3e5700"} Jan 30 00:22:43 crc kubenswrapper[5113]: I0130 00:22:43.595236 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mzrsb" podStartSLOduration=3.917219535 podStartE2EDuration="4.595215677s" podCreationTimestamp="2026-01-30 00:22:39 +0000 UTC" firstStartedPulling="2026-01-30 00:22:40.536495889 +0000 UTC m=+780.609101306" lastFinishedPulling="2026-01-30 00:22:41.214492071 +0000 UTC m=+781.287097448" observedRunningTime="2026-01-30 00:22:43.590317346 +0000 UTC m=+783.662922733" watchObservedRunningTime="2026-01-30 00:22:43.595215677 +0000 UTC m=+783.667821054" Jan 30 00:22:49 crc kubenswrapper[5113]: I0130 00:22:49.728997 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mzrsb" Jan 30 00:22:49 crc kubenswrapper[5113]: I0130 00:22:49.729552 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/community-operators-mzrsb" Jan 30 00:22:49 crc kubenswrapper[5113]: I0130 00:22:49.796909 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mzrsb" Jan 30 00:22:50 crc kubenswrapper[5113]: I0130 00:22:50.684590 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mzrsb" Jan 30 00:22:50 crc kubenswrapper[5113]: I0130 00:22:50.739247 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mzrsb"] Jan 30 00:22:51 crc kubenswrapper[5113]: I0130 00:22:51.195673 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:22:51 crc kubenswrapper[5113]: I0130 00:22:51.195766 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:22:52 crc kubenswrapper[5113]: I0130 00:22:52.650378 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mzrsb" podUID="8a1c3e83-e2d8-4e4e-a292-1eb97cd81491" containerName="registry-server" containerID="cri-o://fd9c68aca58ba0ea2dd75608a5fd43fdf2cc49d73b405d885bf9fb4dcf3e5700" gracePeriod=2 Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.259911 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mzrsb" Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.393210 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bzfhs\" (UniqueName: \"kubernetes.io/projected/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-kube-api-access-bzfhs\") pod \"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491\" (UID: \"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491\") " Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.393331 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-catalog-content\") pod \"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491\" (UID: \"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491\") " Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.393505 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-utilities\") pod \"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491\" (UID: \"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491\") " Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.395798 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-utilities" (OuterVolumeSpecName: "utilities") pod "8a1c3e83-e2d8-4e4e-a292-1eb97cd81491" (UID: "8a1c3e83-e2d8-4e4e-a292-1eb97cd81491"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.405034 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-kube-api-access-bzfhs" (OuterVolumeSpecName: "kube-api-access-bzfhs") pod "8a1c3e83-e2d8-4e4e-a292-1eb97cd81491" (UID: "8a1c3e83-e2d8-4e4e-a292-1eb97cd81491"). InnerVolumeSpecName "kube-api-access-bzfhs". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.459398 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8a1c3e83-e2d8-4e4e-a292-1eb97cd81491" (UID: "8a1c3e83-e2d8-4e4e-a292-1eb97cd81491"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.495921 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.496167 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-bzfhs\" (UniqueName: \"kubernetes.io/projected/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-kube-api-access-bzfhs\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.496180 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.658505 5113 generic.go:358] "Generic (PLEG): container finished" podID="8a1c3e83-e2d8-4e4e-a292-1eb97cd81491" containerID="fd9c68aca58ba0ea2dd75608a5fd43fdf2cc49d73b405d885bf9fb4dcf3e5700" exitCode=0 Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.658576 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mzrsb" event={"ID":"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491","Type":"ContainerDied","Data":"fd9c68aca58ba0ea2dd75608a5fd43fdf2cc49d73b405d885bf9fb4dcf3e5700"} Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.658637 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mzrsb" event={"ID":"8a1c3e83-e2d8-4e4e-a292-1eb97cd81491","Type":"ContainerDied","Data":"17d7445c6a8955669c03421ca9e7c3619cbc5c7e826b2308f677893050bc3431"} Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.658661 5113 scope.go:117] "RemoveContainer" containerID="fd9c68aca58ba0ea2dd75608a5fd43fdf2cc49d73b405d885bf9fb4dcf3e5700" Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.658598 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mzrsb" Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.681559 5113 scope.go:117] "RemoveContainer" containerID="b85823ad6defddbff801a7291fe3ec6810d21fca718df617564b46eb8b5c2963" Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.690091 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mzrsb"] Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.695059 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mzrsb"] Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.712745 5113 scope.go:117] "RemoveContainer" containerID="2755a27ae1206f14960f6091b9075816b15e2a22bed5581c5c7f627905ad4189" Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.734405 5113 scope.go:117] "RemoveContainer" containerID="fd9c68aca58ba0ea2dd75608a5fd43fdf2cc49d73b405d885bf9fb4dcf3e5700" Jan 30 00:22:53 crc kubenswrapper[5113]: E0130 00:22:53.735073 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd9c68aca58ba0ea2dd75608a5fd43fdf2cc49d73b405d885bf9fb4dcf3e5700\": container with ID starting with fd9c68aca58ba0ea2dd75608a5fd43fdf2cc49d73b405d885bf9fb4dcf3e5700 not found: ID does not exist" containerID="fd9c68aca58ba0ea2dd75608a5fd43fdf2cc49d73b405d885bf9fb4dcf3e5700" Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.735121 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd9c68aca58ba0ea2dd75608a5fd43fdf2cc49d73b405d885bf9fb4dcf3e5700"} err="failed to get container status \"fd9c68aca58ba0ea2dd75608a5fd43fdf2cc49d73b405d885bf9fb4dcf3e5700\": rpc error: code = NotFound desc = could not find container \"fd9c68aca58ba0ea2dd75608a5fd43fdf2cc49d73b405d885bf9fb4dcf3e5700\": container with ID starting with fd9c68aca58ba0ea2dd75608a5fd43fdf2cc49d73b405d885bf9fb4dcf3e5700 not found: ID does not exist" Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.735148 5113 scope.go:117] "RemoveContainer" containerID="b85823ad6defddbff801a7291fe3ec6810d21fca718df617564b46eb8b5c2963" Jan 30 00:22:53 crc kubenswrapper[5113]: E0130 00:22:53.736056 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b85823ad6defddbff801a7291fe3ec6810d21fca718df617564b46eb8b5c2963\": container with ID starting with b85823ad6defddbff801a7291fe3ec6810d21fca718df617564b46eb8b5c2963 not found: ID does not exist" containerID="b85823ad6defddbff801a7291fe3ec6810d21fca718df617564b46eb8b5c2963" Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.736128 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b85823ad6defddbff801a7291fe3ec6810d21fca718df617564b46eb8b5c2963"} err="failed to get container status \"b85823ad6defddbff801a7291fe3ec6810d21fca718df617564b46eb8b5c2963\": rpc error: code = NotFound desc = could not find container \"b85823ad6defddbff801a7291fe3ec6810d21fca718df617564b46eb8b5c2963\": container with ID starting with b85823ad6defddbff801a7291fe3ec6810d21fca718df617564b46eb8b5c2963 not found: ID does not exist" Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.736170 5113 scope.go:117] "RemoveContainer" containerID="2755a27ae1206f14960f6091b9075816b15e2a22bed5581c5c7f627905ad4189" Jan 30 00:22:53 crc kubenswrapper[5113]: E0130 00:22:53.736776 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2755a27ae1206f14960f6091b9075816b15e2a22bed5581c5c7f627905ad4189\": container with ID starting with 2755a27ae1206f14960f6091b9075816b15e2a22bed5581c5c7f627905ad4189 not found: ID does not exist" containerID="2755a27ae1206f14960f6091b9075816b15e2a22bed5581c5c7f627905ad4189" Jan 30 00:22:53 crc kubenswrapper[5113]: I0130 00:22:53.736809 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2755a27ae1206f14960f6091b9075816b15e2a22bed5581c5c7f627905ad4189"} err="failed to get container status \"2755a27ae1206f14960f6091b9075816b15e2a22bed5581c5c7f627905ad4189\": rpc error: code = NotFound desc = could not find container \"2755a27ae1206f14960f6091b9075816b15e2a22bed5581c5c7f627905ad4189\": container with ID starting with 2755a27ae1206f14960f6091b9075816b15e2a22bed5581c5c7f627905ad4189 not found: ID does not exist" Jan 30 00:22:54 crc kubenswrapper[5113]: I0130 00:22:54.783815 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a1c3e83-e2d8-4e4e-a292-1eb97cd81491" path="/var/lib/kubelet/pods/8a1c3e83-e2d8-4e4e-a292-1eb97cd81491/volumes" Jan 30 00:22:56 crc kubenswrapper[5113]: E0130 00:22:56.775078 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:23:11 crc kubenswrapper[5113]: E0130 00:23:11.033652 5113 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" image="registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb" Jan 30 00:23:11 crc kubenswrapper[5113]: E0130 00:23:11.035380 5113 kuberuntime_manager.go:1358] "Unhandled Error" err="init container &Container{Name:pull,Image:registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb,Command:[/util/cpb /bundle],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:bundle,ReadOnly:false,MountPath:/bundle,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:util,ReadOnly:false,MountPath:/util,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pxmz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000240000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46_openshift-marketplace(37e551ce-ff20-486b-986a-429cb060e341): ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" logger="UnhandledError" Jan 30 00:23:11 crc kubenswrapper[5113]: E0130 00:23:11.036615 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ErrImagePull: \"unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:23:21 crc kubenswrapper[5113]: I0130 00:23:21.195340 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:23:21 crc kubenswrapper[5113]: I0130 00:23:21.196314 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:23:21 crc kubenswrapper[5113]: I0130 00:23:21.196415 5113 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:23:21 crc kubenswrapper[5113]: I0130 00:23:21.197324 5113 kuberuntime_manager.go:1107] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"dbf3b234b28fc071e0fac78c4e03f35f6c4a815279840f68d3fec8f928bdd4c7"} pod="openshift-machine-config-operator/machine-config-daemon-gxph5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 00:23:21 crc kubenswrapper[5113]: I0130 00:23:21.197545 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" containerID="cri-o://dbf3b234b28fc071e0fac78c4e03f35f6c4a815279840f68d3fec8f928bdd4c7" gracePeriod=600 Jan 30 00:23:21 crc kubenswrapper[5113]: I0130 00:23:21.923785 5113 generic.go:358] "Generic (PLEG): container finished" podID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerID="dbf3b234b28fc071e0fac78c4e03f35f6c4a815279840f68d3fec8f928bdd4c7" exitCode=0 Jan 30 00:23:21 crc kubenswrapper[5113]: I0130 00:23:21.923984 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" event={"ID":"dccb6bc1-d2db-4bf2-a0db-1c84219d0499","Type":"ContainerDied","Data":"dbf3b234b28fc071e0fac78c4e03f35f6c4a815279840f68d3fec8f928bdd4c7"} Jan 30 00:23:21 crc kubenswrapper[5113]: I0130 00:23:21.924295 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" event={"ID":"dccb6bc1-d2db-4bf2-a0db-1c84219d0499","Type":"ContainerStarted","Data":"c9c5d9da8823301bda322eade9ad07902d469bbebb6741898c8ea384844e2d83"} Jan 30 00:23:21 crc kubenswrapper[5113]: I0130 00:23:21.924450 5113 scope.go:117] "RemoveContainer" containerID="d8d25d4044f296bffb6b6885c2a6830e6967a1638a903216ca33f356c73951bf" Jan 30 00:23:23 crc kubenswrapper[5113]: E0130 00:23:23.778780 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:23:37 crc kubenswrapper[5113]: E0130 00:23:37.777352 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:23:50 crc kubenswrapper[5113]: E0130 00:23:50.786612 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.143056 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29495544-mx26v"] Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.146050 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="8a1c3e83-e2d8-4e4e-a292-1eb97cd81491" containerName="extract-utilities" Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.146185 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a1c3e83-e2d8-4e4e-a292-1eb97cd81491" containerName="extract-utilities" Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.146280 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="8a1c3e83-e2d8-4e4e-a292-1eb97cd81491" containerName="registry-server" Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.146334 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a1c3e83-e2d8-4e4e-a292-1eb97cd81491" containerName="registry-server" Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.146390 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="8a1c3e83-e2d8-4e4e-a292-1eb97cd81491" containerName="extract-content" Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.146459 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a1c3e83-e2d8-4e4e-a292-1eb97cd81491" containerName="extract-content" Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.146689 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="8a1c3e83-e2d8-4e4e-a292-1eb97cd81491" containerName="registry-server" Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.176474 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495544-mx26v"] Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.176737 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495544-mx26v" Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.191749 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-infra\"/\"csr-approver-sa-dockercfg-kshml\"" Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.192139 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"openshift-service-ca.crt\"" Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.192296 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"kube-root-ca.crt\"" Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.320489 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5q9lw\" (UniqueName: \"kubernetes.io/projected/61b8e575-6023-481f-88d1-4ead3fb76864-kube-api-access-5q9lw\") pod \"auto-csr-approver-29495544-mx26v\" (UID: \"61b8e575-6023-481f-88d1-4ead3fb76864\") " pod="openshift-infra/auto-csr-approver-29495544-mx26v" Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.422227 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-5q9lw\" (UniqueName: \"kubernetes.io/projected/61b8e575-6023-481f-88d1-4ead3fb76864-kube-api-access-5q9lw\") pod \"auto-csr-approver-29495544-mx26v\" (UID: \"61b8e575-6023-481f-88d1-4ead3fb76864\") " pod="openshift-infra/auto-csr-approver-29495544-mx26v" Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.454410 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-5q9lw\" (UniqueName: \"kubernetes.io/projected/61b8e575-6023-481f-88d1-4ead3fb76864-kube-api-access-5q9lw\") pod \"auto-csr-approver-29495544-mx26v\" (UID: \"61b8e575-6023-481f-88d1-4ead3fb76864\") " pod="openshift-infra/auto-csr-approver-29495544-mx26v" Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.517004 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495544-mx26v" Jan 30 00:24:00 crc kubenswrapper[5113]: I0130 00:24:00.783813 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495544-mx26v"] Jan 30 00:24:00 crc kubenswrapper[5113]: W0130 00:24:00.791691 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod61b8e575_6023_481f_88d1_4ead3fb76864.slice/crio-b668fd4a06072c7f81bd9c0476ef5eb7dcd239f83ec40a0c3fc0150a0331a4e2 WatchSource:0}: Error finding container b668fd4a06072c7f81bd9c0476ef5eb7dcd239f83ec40a0c3fc0150a0331a4e2: Status 404 returned error can't find the container with id b668fd4a06072c7f81bd9c0476ef5eb7dcd239f83ec40a0c3fc0150a0331a4e2 Jan 30 00:24:01 crc kubenswrapper[5113]: I0130 00:24:01.252818 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495544-mx26v" event={"ID":"61b8e575-6023-481f-88d1-4ead3fb76864","Type":"ContainerStarted","Data":"b668fd4a06072c7f81bd9c0476ef5eb7dcd239f83ec40a0c3fc0150a0331a4e2"} Jan 30 00:24:02 crc kubenswrapper[5113]: I0130 00:24:02.261712 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495544-mx26v" event={"ID":"61b8e575-6023-481f-88d1-4ead3fb76864","Type":"ContainerStarted","Data":"527a3c407c72c80d5fc5511e63985368838a916d571fb649fd2aeaa8aaf2e565"} Jan 30 00:24:02 crc kubenswrapper[5113]: I0130 00:24:02.289898 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-infra/auto-csr-approver-29495544-mx26v" podStartSLOduration=1.398976414 podStartE2EDuration="2.289874924s" podCreationTimestamp="2026-01-30 00:24:00 +0000 UTC" firstStartedPulling="2026-01-30 00:24:00.793796704 +0000 UTC m=+860.866402081" lastFinishedPulling="2026-01-30 00:24:01.684695214 +0000 UTC m=+861.757300591" observedRunningTime="2026-01-30 00:24:02.28295182 +0000 UTC m=+862.355557207" watchObservedRunningTime="2026-01-30 00:24:02.289874924 +0000 UTC m=+862.362480301" Jan 30 00:24:02 crc kubenswrapper[5113]: E0130 00:24:02.775834 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:24:03 crc kubenswrapper[5113]: I0130 00:24:03.273462 5113 generic.go:358] "Generic (PLEG): container finished" podID="61b8e575-6023-481f-88d1-4ead3fb76864" containerID="527a3c407c72c80d5fc5511e63985368838a916d571fb649fd2aeaa8aaf2e565" exitCode=0 Jan 30 00:24:03 crc kubenswrapper[5113]: I0130 00:24:03.273745 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495544-mx26v" event={"ID":"61b8e575-6023-481f-88d1-4ead3fb76864","Type":"ContainerDied","Data":"527a3c407c72c80d5fc5511e63985368838a916d571fb649fd2aeaa8aaf2e565"} Jan 30 00:24:04 crc kubenswrapper[5113]: I0130 00:24:04.576625 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495544-mx26v" Jan 30 00:24:04 crc kubenswrapper[5113]: I0130 00:24:04.707816 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5q9lw\" (UniqueName: \"kubernetes.io/projected/61b8e575-6023-481f-88d1-4ead3fb76864-kube-api-access-5q9lw\") pod \"61b8e575-6023-481f-88d1-4ead3fb76864\" (UID: \"61b8e575-6023-481f-88d1-4ead3fb76864\") " Jan 30 00:24:04 crc kubenswrapper[5113]: I0130 00:24:04.717296 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61b8e575-6023-481f-88d1-4ead3fb76864-kube-api-access-5q9lw" (OuterVolumeSpecName: "kube-api-access-5q9lw") pod "61b8e575-6023-481f-88d1-4ead3fb76864" (UID: "61b8e575-6023-481f-88d1-4ead3fb76864"). InnerVolumeSpecName "kube-api-access-5q9lw". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:24:04 crc kubenswrapper[5113]: I0130 00:24:04.810145 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-5q9lw\" (UniqueName: \"kubernetes.io/projected/61b8e575-6023-481f-88d1-4ead3fb76864-kube-api-access-5q9lw\") on node \"crc\" DevicePath \"\"" Jan 30 00:24:05 crc kubenswrapper[5113]: I0130 00:24:05.245824 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29495538-dkktd"] Jan 30 00:24:05 crc kubenswrapper[5113]: I0130 00:24:05.250011 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29495538-dkktd"] Jan 30 00:24:05 crc kubenswrapper[5113]: I0130 00:24:05.296306 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495544-mx26v" Jan 30 00:24:05 crc kubenswrapper[5113]: I0130 00:24:05.296571 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495544-mx26v" event={"ID":"61b8e575-6023-481f-88d1-4ead3fb76864","Type":"ContainerDied","Data":"b668fd4a06072c7f81bd9c0476ef5eb7dcd239f83ec40a0c3fc0150a0331a4e2"} Jan 30 00:24:05 crc kubenswrapper[5113]: I0130 00:24:05.296629 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b668fd4a06072c7f81bd9c0476ef5eb7dcd239f83ec40a0c3fc0150a0331a4e2" Jan 30 00:24:06 crc kubenswrapper[5113]: I0130 00:24:06.782393 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9044cac0-e686-4518-8879-3082b7842df6" path="/var/lib/kubelet/pods/9044cac0-e686-4518-8879-3082b7842df6/volumes" Jan 30 00:24:14 crc kubenswrapper[5113]: E0130 00:24:14.778582 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:24:25 crc kubenswrapper[5113]: E0130 00:24:25.777421 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:24:37 crc kubenswrapper[5113]: E0130 00:24:37.021296 5113 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" image="registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb" Jan 30 00:24:37 crc kubenswrapper[5113]: E0130 00:24:37.022635 5113 kuberuntime_manager.go:1358] "Unhandled Error" err="init container &Container{Name:pull,Image:registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb,Command:[/util/cpb /bundle],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:bundle,ReadOnly:false,MountPath:/bundle,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:util,ReadOnly:false,MountPath:/util,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pxmz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000240000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46_openshift-marketplace(37e551ce-ff20-486b-986a-429cb060e341): ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" logger="UnhandledError" Jan 30 00:24:37 crc kubenswrapper[5113]: E0130 00:24:37.024023 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ErrImagePull: \"unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:24:41 crc kubenswrapper[5113]: I0130 00:24:41.120746 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-576bd486d8-68jgg_3bde0ed2-71a3-4281-b21d-af61bdb778ef/oauth-openshift/1.log" Jan 30 00:24:41 crc kubenswrapper[5113]: I0130 00:24:41.121181 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-576bd486d8-68jgg_3bde0ed2-71a3-4281-b21d-af61bdb778ef/oauth-openshift/1.log" Jan 30 00:24:41 crc kubenswrapper[5113]: I0130 00:24:41.166221 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mbd62_8ad95d7b-7c01-4672-8614-0cc8e52c0d79/kube-multus/0.log" Jan 30 00:24:41 crc kubenswrapper[5113]: I0130 00:24:41.166932 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mbd62_8ad95d7b-7c01-4672-8614-0cc8e52c0d79/kube-multus/0.log" Jan 30 00:24:41 crc kubenswrapper[5113]: I0130 00:24:41.170925 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/0.log" Jan 30 00:24:41 crc kubenswrapper[5113]: I0130 00:24:41.171389 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/0.log" Jan 30 00:24:42 crc kubenswrapper[5113]: I0130 00:24:42.030242 5113 scope.go:117] "RemoveContainer" containerID="53347a45a6019c6232fb5f7a7ce5f43115e2d60235df398e2567b1a988e926da" Jan 30 00:24:48 crc kubenswrapper[5113]: E0130 00:24:48.777369 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:24:50 crc kubenswrapper[5113]: I0130 00:24:50.873802 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-must-gather-bs6s9/must-gather-r5v74"] Jan 30 00:24:50 crc kubenswrapper[5113]: I0130 00:24:50.874921 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="61b8e575-6023-481f-88d1-4ead3fb76864" containerName="oc" Jan 30 00:24:50 crc kubenswrapper[5113]: I0130 00:24:50.874937 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="61b8e575-6023-481f-88d1-4ead3fb76864" containerName="oc" Jan 30 00:24:50 crc kubenswrapper[5113]: I0130 00:24:50.875081 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="61b8e575-6023-481f-88d1-4ead3fb76864" containerName="oc" Jan 30 00:24:50 crc kubenswrapper[5113]: I0130 00:24:50.880166 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bs6s9/must-gather-r5v74" Jan 30 00:24:50 crc kubenswrapper[5113]: I0130 00:24:50.884753 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-must-gather-bs6s9\"/\"default-dockercfg-srvhc\"" Jan 30 00:24:50 crc kubenswrapper[5113]: I0130 00:24:50.885056 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-must-gather-bs6s9\"/\"openshift-service-ca.crt\"" Jan 30 00:24:50 crc kubenswrapper[5113]: I0130 00:24:50.885387 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-must-gather-bs6s9\"/\"kube-root-ca.crt\"" Jan 30 00:24:50 crc kubenswrapper[5113]: I0130 00:24:50.892299 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-bs6s9/must-gather-r5v74"] Jan 30 00:24:50 crc kubenswrapper[5113]: I0130 00:24:50.980313 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5fvx\" (UniqueName: \"kubernetes.io/projected/e86f32f3-00ea-4ada-b7ed-be7376c43705-kube-api-access-v5fvx\") pod \"must-gather-r5v74\" (UID: \"e86f32f3-00ea-4ada-b7ed-be7376c43705\") " pod="openshift-must-gather-bs6s9/must-gather-r5v74" Jan 30 00:24:50 crc kubenswrapper[5113]: I0130 00:24:50.980428 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e86f32f3-00ea-4ada-b7ed-be7376c43705-must-gather-output\") pod \"must-gather-r5v74\" (UID: \"e86f32f3-00ea-4ada-b7ed-be7376c43705\") " pod="openshift-must-gather-bs6s9/must-gather-r5v74" Jan 30 00:24:51 crc kubenswrapper[5113]: I0130 00:24:51.081956 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-v5fvx\" (UniqueName: \"kubernetes.io/projected/e86f32f3-00ea-4ada-b7ed-be7376c43705-kube-api-access-v5fvx\") pod \"must-gather-r5v74\" (UID: \"e86f32f3-00ea-4ada-b7ed-be7376c43705\") " pod="openshift-must-gather-bs6s9/must-gather-r5v74" Jan 30 00:24:51 crc kubenswrapper[5113]: I0130 00:24:51.082053 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e86f32f3-00ea-4ada-b7ed-be7376c43705-must-gather-output\") pod \"must-gather-r5v74\" (UID: \"e86f32f3-00ea-4ada-b7ed-be7376c43705\") " pod="openshift-must-gather-bs6s9/must-gather-r5v74" Jan 30 00:24:51 crc kubenswrapper[5113]: I0130 00:24:51.082952 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e86f32f3-00ea-4ada-b7ed-be7376c43705-must-gather-output\") pod \"must-gather-r5v74\" (UID: \"e86f32f3-00ea-4ada-b7ed-be7376c43705\") " pod="openshift-must-gather-bs6s9/must-gather-r5v74" Jan 30 00:24:51 crc kubenswrapper[5113]: I0130 00:24:51.107363 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5fvx\" (UniqueName: \"kubernetes.io/projected/e86f32f3-00ea-4ada-b7ed-be7376c43705-kube-api-access-v5fvx\") pod \"must-gather-r5v74\" (UID: \"e86f32f3-00ea-4ada-b7ed-be7376c43705\") " pod="openshift-must-gather-bs6s9/must-gather-r5v74" Jan 30 00:24:51 crc kubenswrapper[5113]: I0130 00:24:51.207884 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bs6s9/must-gather-r5v74" Jan 30 00:24:51 crc kubenswrapper[5113]: I0130 00:24:51.566421 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-bs6s9/must-gather-r5v74"] Jan 30 00:24:51 crc kubenswrapper[5113]: I0130 00:24:51.683310 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bs6s9/must-gather-r5v74" event={"ID":"e86f32f3-00ea-4ada-b7ed-be7376c43705","Type":"ContainerStarted","Data":"e09d08de17b11c66026effc91372e40de3d0f76bfb442f6acd8716506988e680"} Jan 30 00:24:57 crc kubenswrapper[5113]: I0130 00:24:57.733822 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bs6s9/must-gather-r5v74" event={"ID":"e86f32f3-00ea-4ada-b7ed-be7376c43705","Type":"ContainerStarted","Data":"eccaec654d26e6fd0ddba0e3efd7308740ab2e4b375eca624fb2344ac7cac63f"} Jan 30 00:24:57 crc kubenswrapper[5113]: I0130 00:24:57.734287 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bs6s9/must-gather-r5v74" event={"ID":"e86f32f3-00ea-4ada-b7ed-be7376c43705","Type":"ContainerStarted","Data":"de2f9a0576aeadb6423b2954d3732784c671052e555256827a9230e40b127106"} Jan 30 00:25:03 crc kubenswrapper[5113]: E0130 00:25:03.776824 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:25:03 crc kubenswrapper[5113]: I0130 00:25:03.816027 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-bs6s9/must-gather-r5v74" podStartSLOduration=8.457355775 podStartE2EDuration="13.816003558s" podCreationTimestamp="2026-01-30 00:24:50 +0000 UTC" firstStartedPulling="2026-01-30 00:24:51.577130329 +0000 UTC m=+911.649735716" lastFinishedPulling="2026-01-30 00:24:56.935778072 +0000 UTC m=+917.008383499" observedRunningTime="2026-01-30 00:24:57.759245622 +0000 UTC m=+917.831850999" watchObservedRunningTime="2026-01-30 00:25:03.816003558 +0000 UTC m=+923.888608935" Jan 30 00:25:17 crc kubenswrapper[5113]: E0130 00:25:17.776499 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:25:21 crc kubenswrapper[5113]: I0130 00:25:21.196965 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:25:21 crc kubenswrapper[5113]: I0130 00:25:21.197942 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:25:28 crc kubenswrapper[5113]: I0130 00:25:28.775587 5113 provider.go:93] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 00:25:28 crc kubenswrapper[5113]: E0130 00:25:28.776771 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:25:34 crc kubenswrapper[5113]: I0130 00:25:34.825368 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bwhr6"] Jan 30 00:25:34 crc kubenswrapper[5113]: I0130 00:25:34.843499 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bwhr6"] Jan 30 00:25:34 crc kubenswrapper[5113]: I0130 00:25:34.843754 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bwhr6" Jan 30 00:25:34 crc kubenswrapper[5113]: I0130 00:25:34.893151 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/804b508d-8683-48c2-bd0f-3b254cbde7b2-catalog-content\") pod \"certified-operators-bwhr6\" (UID: \"804b508d-8683-48c2-bd0f-3b254cbde7b2\") " pod="openshift-marketplace/certified-operators-bwhr6" Jan 30 00:25:34 crc kubenswrapper[5113]: I0130 00:25:34.893239 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmrtc\" (UniqueName: \"kubernetes.io/projected/804b508d-8683-48c2-bd0f-3b254cbde7b2-kube-api-access-cmrtc\") pod \"certified-operators-bwhr6\" (UID: \"804b508d-8683-48c2-bd0f-3b254cbde7b2\") " pod="openshift-marketplace/certified-operators-bwhr6" Jan 30 00:25:34 crc kubenswrapper[5113]: I0130 00:25:34.893285 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/804b508d-8683-48c2-bd0f-3b254cbde7b2-utilities\") pod \"certified-operators-bwhr6\" (UID: \"804b508d-8683-48c2-bd0f-3b254cbde7b2\") " pod="openshift-marketplace/certified-operators-bwhr6" Jan 30 00:25:34 crc kubenswrapper[5113]: I0130 00:25:34.995437 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-cmrtc\" (UniqueName: \"kubernetes.io/projected/804b508d-8683-48c2-bd0f-3b254cbde7b2-kube-api-access-cmrtc\") pod \"certified-operators-bwhr6\" (UID: \"804b508d-8683-48c2-bd0f-3b254cbde7b2\") " pod="openshift-marketplace/certified-operators-bwhr6" Jan 30 00:25:34 crc kubenswrapper[5113]: I0130 00:25:34.995592 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/804b508d-8683-48c2-bd0f-3b254cbde7b2-utilities\") pod \"certified-operators-bwhr6\" (UID: \"804b508d-8683-48c2-bd0f-3b254cbde7b2\") " pod="openshift-marketplace/certified-operators-bwhr6" Jan 30 00:25:34 crc kubenswrapper[5113]: I0130 00:25:34.995644 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/804b508d-8683-48c2-bd0f-3b254cbde7b2-catalog-content\") pod \"certified-operators-bwhr6\" (UID: \"804b508d-8683-48c2-bd0f-3b254cbde7b2\") " pod="openshift-marketplace/certified-operators-bwhr6" Jan 30 00:25:34 crc kubenswrapper[5113]: I0130 00:25:34.996243 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/804b508d-8683-48c2-bd0f-3b254cbde7b2-utilities\") pod \"certified-operators-bwhr6\" (UID: \"804b508d-8683-48c2-bd0f-3b254cbde7b2\") " pod="openshift-marketplace/certified-operators-bwhr6" Jan 30 00:25:34 crc kubenswrapper[5113]: I0130 00:25:34.996309 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/804b508d-8683-48c2-bd0f-3b254cbde7b2-catalog-content\") pod \"certified-operators-bwhr6\" (UID: \"804b508d-8683-48c2-bd0f-3b254cbde7b2\") " pod="openshift-marketplace/certified-operators-bwhr6" Jan 30 00:25:35 crc kubenswrapper[5113]: I0130 00:25:35.026805 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmrtc\" (UniqueName: \"kubernetes.io/projected/804b508d-8683-48c2-bd0f-3b254cbde7b2-kube-api-access-cmrtc\") pod \"certified-operators-bwhr6\" (UID: \"804b508d-8683-48c2-bd0f-3b254cbde7b2\") " pod="openshift-marketplace/certified-operators-bwhr6" Jan 30 00:25:35 crc kubenswrapper[5113]: I0130 00:25:35.165182 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bwhr6" Jan 30 00:25:35 crc kubenswrapper[5113]: I0130 00:25:35.631486 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bwhr6"] Jan 30 00:25:36 crc kubenswrapper[5113]: I0130 00:25:36.023208 5113 generic.go:358] "Generic (PLEG): container finished" podID="804b508d-8683-48c2-bd0f-3b254cbde7b2" containerID="7adb02f575bd1f206425127c4656d3670cc6726048ee069bf4e63b957670c0c5" exitCode=0 Jan 30 00:25:36 crc kubenswrapper[5113]: I0130 00:25:36.023325 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bwhr6" event={"ID":"804b508d-8683-48c2-bd0f-3b254cbde7b2","Type":"ContainerDied","Data":"7adb02f575bd1f206425127c4656d3670cc6726048ee069bf4e63b957670c0c5"} Jan 30 00:25:36 crc kubenswrapper[5113]: I0130 00:25:36.024027 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bwhr6" event={"ID":"804b508d-8683-48c2-bd0f-3b254cbde7b2","Type":"ContainerStarted","Data":"a221c23ca6e19f0bbed39837fe70aa68a015503392dc7c2bb0f337f565698722"} Jan 30 00:25:37 crc kubenswrapper[5113]: I0130 00:25:37.033830 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bwhr6" event={"ID":"804b508d-8683-48c2-bd0f-3b254cbde7b2","Type":"ContainerStarted","Data":"f12ba70d879d131af238a5b1043700cfaaa950d392014596ebb044607ec8ccf4"} Jan 30 00:25:38 crc kubenswrapper[5113]: I0130 00:25:38.046315 5113 generic.go:358] "Generic (PLEG): container finished" podID="804b508d-8683-48c2-bd0f-3b254cbde7b2" containerID="f12ba70d879d131af238a5b1043700cfaaa950d392014596ebb044607ec8ccf4" exitCode=0 Jan 30 00:25:38 crc kubenswrapper[5113]: I0130 00:25:38.046495 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bwhr6" event={"ID":"804b508d-8683-48c2-bd0f-3b254cbde7b2","Type":"ContainerDied","Data":"f12ba70d879d131af238a5b1043700cfaaa950d392014596ebb044607ec8ccf4"} Jan 30 00:25:39 crc kubenswrapper[5113]: I0130 00:25:39.056345 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bwhr6" event={"ID":"804b508d-8683-48c2-bd0f-3b254cbde7b2","Type":"ContainerStarted","Data":"df4164cdf55434a20304078433b4bbd03dfcee17e3e36be30fc6c46dd4e25dfd"} Jan 30 00:25:39 crc kubenswrapper[5113]: I0130 00:25:39.079604 5113 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bwhr6" podStartSLOduration=4.395158188 podStartE2EDuration="5.079509088s" podCreationTimestamp="2026-01-30 00:25:34 +0000 UTC" firstStartedPulling="2026-01-30 00:25:36.024423865 +0000 UTC m=+956.097029232" lastFinishedPulling="2026-01-30 00:25:36.708774755 +0000 UTC m=+956.781380132" observedRunningTime="2026-01-30 00:25:39.078702654 +0000 UTC m=+959.151308031" watchObservedRunningTime="2026-01-30 00:25:39.079509088 +0000 UTC m=+959.152114465" Jan 30 00:25:42 crc kubenswrapper[5113]: E0130 00:25:42.776553 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:25:43 crc kubenswrapper[5113]: I0130 00:25:43.421782 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-75ffdb6fcd-lxlzd_17999f27-0d6c-46f2-82b4-a07bec4b1021/control-plane-machine-set-operator/0.log" Jan 30 00:25:43 crc kubenswrapper[5113]: I0130 00:25:43.562430 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-755bb95488-krhlw_0c156d1d-c9e6-43e7-b515-7a9314879127/kube-rbac-proxy/0.log" Jan 30 00:25:43 crc kubenswrapper[5113]: I0130 00:25:43.627233 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-755bb95488-krhlw_0c156d1d-c9e6-43e7-b515-7a9314879127/machine-api-operator/0.log" Jan 30 00:25:45 crc kubenswrapper[5113]: I0130 00:25:45.168896 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bwhr6" Jan 30 00:25:45 crc kubenswrapper[5113]: I0130 00:25:45.169383 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="not ready" pod="openshift-marketplace/certified-operators-bwhr6" Jan 30 00:25:45 crc kubenswrapper[5113]: I0130 00:25:45.231205 5113 kubelet.go:2658] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bwhr6" Jan 30 00:25:46 crc kubenswrapper[5113]: I0130 00:25:46.170707 5113 kubelet.go:2658] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bwhr6" Jan 30 00:25:46 crc kubenswrapper[5113]: I0130 00:25:46.233672 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bwhr6"] Jan 30 00:25:48 crc kubenswrapper[5113]: I0130 00:25:48.130718 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-bwhr6" podUID="804b508d-8683-48c2-bd0f-3b254cbde7b2" containerName="registry-server" containerID="cri-o://df4164cdf55434a20304078433b4bbd03dfcee17e3e36be30fc6c46dd4e25dfd" gracePeriod=2 Jan 30 00:25:48 crc kubenswrapper[5113]: I0130 00:25:48.507635 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bwhr6" Jan 30 00:25:48 crc kubenswrapper[5113]: I0130 00:25:48.609728 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/804b508d-8683-48c2-bd0f-3b254cbde7b2-utilities\") pod \"804b508d-8683-48c2-bd0f-3b254cbde7b2\" (UID: \"804b508d-8683-48c2-bd0f-3b254cbde7b2\") " Jan 30 00:25:48 crc kubenswrapper[5113]: I0130 00:25:48.610220 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/804b508d-8683-48c2-bd0f-3b254cbde7b2-catalog-content\") pod \"804b508d-8683-48c2-bd0f-3b254cbde7b2\" (UID: \"804b508d-8683-48c2-bd0f-3b254cbde7b2\") " Jan 30 00:25:48 crc kubenswrapper[5113]: I0130 00:25:48.610257 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmrtc\" (UniqueName: \"kubernetes.io/projected/804b508d-8683-48c2-bd0f-3b254cbde7b2-kube-api-access-cmrtc\") pod \"804b508d-8683-48c2-bd0f-3b254cbde7b2\" (UID: \"804b508d-8683-48c2-bd0f-3b254cbde7b2\") " Jan 30 00:25:48 crc kubenswrapper[5113]: I0130 00:25:48.613409 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/804b508d-8683-48c2-bd0f-3b254cbde7b2-utilities" (OuterVolumeSpecName: "utilities") pod "804b508d-8683-48c2-bd0f-3b254cbde7b2" (UID: "804b508d-8683-48c2-bd0f-3b254cbde7b2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:25:48 crc kubenswrapper[5113]: I0130 00:25:48.629895 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/804b508d-8683-48c2-bd0f-3b254cbde7b2-kube-api-access-cmrtc" (OuterVolumeSpecName: "kube-api-access-cmrtc") pod "804b508d-8683-48c2-bd0f-3b254cbde7b2" (UID: "804b508d-8683-48c2-bd0f-3b254cbde7b2"). InnerVolumeSpecName "kube-api-access-cmrtc". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:25:48 crc kubenswrapper[5113]: I0130 00:25:48.652374 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/804b508d-8683-48c2-bd0f-3b254cbde7b2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "804b508d-8683-48c2-bd0f-3b254cbde7b2" (UID: "804b508d-8683-48c2-bd0f-3b254cbde7b2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:25:48 crc kubenswrapper[5113]: I0130 00:25:48.714228 5113 reconciler_common.go:299] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/804b508d-8683-48c2-bd0f-3b254cbde7b2-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 00:25:48 crc kubenswrapper[5113]: I0130 00:25:48.714289 5113 reconciler_common.go:299] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/804b508d-8683-48c2-bd0f-3b254cbde7b2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 00:25:48 crc kubenswrapper[5113]: I0130 00:25:48.714306 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-cmrtc\" (UniqueName: \"kubernetes.io/projected/804b508d-8683-48c2-bd0f-3b254cbde7b2-kube-api-access-cmrtc\") on node \"crc\" DevicePath \"\"" Jan 30 00:25:49 crc kubenswrapper[5113]: I0130 00:25:49.143556 5113 generic.go:358] "Generic (PLEG): container finished" podID="804b508d-8683-48c2-bd0f-3b254cbde7b2" containerID="df4164cdf55434a20304078433b4bbd03dfcee17e3e36be30fc6c46dd4e25dfd" exitCode=0 Jan 30 00:25:49 crc kubenswrapper[5113]: I0130 00:25:49.143740 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bwhr6" event={"ID":"804b508d-8683-48c2-bd0f-3b254cbde7b2","Type":"ContainerDied","Data":"df4164cdf55434a20304078433b4bbd03dfcee17e3e36be30fc6c46dd4e25dfd"} Jan 30 00:25:49 crc kubenswrapper[5113]: I0130 00:25:49.143783 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bwhr6" event={"ID":"804b508d-8683-48c2-bd0f-3b254cbde7b2","Type":"ContainerDied","Data":"a221c23ca6e19f0bbed39837fe70aa68a015503392dc7c2bb0f337f565698722"} Jan 30 00:25:49 crc kubenswrapper[5113]: I0130 00:25:49.143812 5113 scope.go:117] "RemoveContainer" containerID="df4164cdf55434a20304078433b4bbd03dfcee17e3e36be30fc6c46dd4e25dfd" Jan 30 00:25:49 crc kubenswrapper[5113]: I0130 00:25:49.144053 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bwhr6" Jan 30 00:25:49 crc kubenswrapper[5113]: I0130 00:25:49.178515 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bwhr6"] Jan 30 00:25:49 crc kubenswrapper[5113]: I0130 00:25:49.185846 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-bwhr6"] Jan 30 00:25:49 crc kubenswrapper[5113]: I0130 00:25:49.189969 5113 scope.go:117] "RemoveContainer" containerID="f12ba70d879d131af238a5b1043700cfaaa950d392014596ebb044607ec8ccf4" Jan 30 00:25:49 crc kubenswrapper[5113]: I0130 00:25:49.210583 5113 scope.go:117] "RemoveContainer" containerID="7adb02f575bd1f206425127c4656d3670cc6726048ee069bf4e63b957670c0c5" Jan 30 00:25:49 crc kubenswrapper[5113]: I0130 00:25:49.245783 5113 scope.go:117] "RemoveContainer" containerID="df4164cdf55434a20304078433b4bbd03dfcee17e3e36be30fc6c46dd4e25dfd" Jan 30 00:25:49 crc kubenswrapper[5113]: E0130 00:25:49.246851 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df4164cdf55434a20304078433b4bbd03dfcee17e3e36be30fc6c46dd4e25dfd\": container with ID starting with df4164cdf55434a20304078433b4bbd03dfcee17e3e36be30fc6c46dd4e25dfd not found: ID does not exist" containerID="df4164cdf55434a20304078433b4bbd03dfcee17e3e36be30fc6c46dd4e25dfd" Jan 30 00:25:49 crc kubenswrapper[5113]: I0130 00:25:49.246925 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df4164cdf55434a20304078433b4bbd03dfcee17e3e36be30fc6c46dd4e25dfd"} err="failed to get container status \"df4164cdf55434a20304078433b4bbd03dfcee17e3e36be30fc6c46dd4e25dfd\": rpc error: code = NotFound desc = could not find container \"df4164cdf55434a20304078433b4bbd03dfcee17e3e36be30fc6c46dd4e25dfd\": container with ID starting with df4164cdf55434a20304078433b4bbd03dfcee17e3e36be30fc6c46dd4e25dfd not found: ID does not exist" Jan 30 00:25:49 crc kubenswrapper[5113]: I0130 00:25:49.246973 5113 scope.go:117] "RemoveContainer" containerID="f12ba70d879d131af238a5b1043700cfaaa950d392014596ebb044607ec8ccf4" Jan 30 00:25:49 crc kubenswrapper[5113]: E0130 00:25:49.247583 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f12ba70d879d131af238a5b1043700cfaaa950d392014596ebb044607ec8ccf4\": container with ID starting with f12ba70d879d131af238a5b1043700cfaaa950d392014596ebb044607ec8ccf4 not found: ID does not exist" containerID="f12ba70d879d131af238a5b1043700cfaaa950d392014596ebb044607ec8ccf4" Jan 30 00:25:49 crc kubenswrapper[5113]: I0130 00:25:49.247625 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f12ba70d879d131af238a5b1043700cfaaa950d392014596ebb044607ec8ccf4"} err="failed to get container status \"f12ba70d879d131af238a5b1043700cfaaa950d392014596ebb044607ec8ccf4\": rpc error: code = NotFound desc = could not find container \"f12ba70d879d131af238a5b1043700cfaaa950d392014596ebb044607ec8ccf4\": container with ID starting with f12ba70d879d131af238a5b1043700cfaaa950d392014596ebb044607ec8ccf4 not found: ID does not exist" Jan 30 00:25:49 crc kubenswrapper[5113]: I0130 00:25:49.247660 5113 scope.go:117] "RemoveContainer" containerID="7adb02f575bd1f206425127c4656d3670cc6726048ee069bf4e63b957670c0c5" Jan 30 00:25:49 crc kubenswrapper[5113]: E0130 00:25:49.247953 5113 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7adb02f575bd1f206425127c4656d3670cc6726048ee069bf4e63b957670c0c5\": container with ID starting with 7adb02f575bd1f206425127c4656d3670cc6726048ee069bf4e63b957670c0c5 not found: ID does not exist" containerID="7adb02f575bd1f206425127c4656d3670cc6726048ee069bf4e63b957670c0c5" Jan 30 00:25:49 crc kubenswrapper[5113]: I0130 00:25:49.248121 5113 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7adb02f575bd1f206425127c4656d3670cc6726048ee069bf4e63b957670c0c5"} err="failed to get container status \"7adb02f575bd1f206425127c4656d3670cc6726048ee069bf4e63b957670c0c5\": rpc error: code = NotFound desc = could not find container \"7adb02f575bd1f206425127c4656d3670cc6726048ee069bf4e63b957670c0c5\": container with ID starting with 7adb02f575bd1f206425127c4656d3670cc6726048ee069bf4e63b957670c0c5 not found: ID does not exist" Jan 30 00:25:50 crc kubenswrapper[5113]: I0130 00:25:50.782209 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="804b508d-8683-48c2-bd0f-3b254cbde7b2" path="/var/lib/kubelet/pods/804b508d-8683-48c2-bd0f-3b254cbde7b2/volumes" Jan 30 00:25:51 crc kubenswrapper[5113]: I0130 00:25:51.195668 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:25:51 crc kubenswrapper[5113]: I0130 00:25:51.195769 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:25:55 crc kubenswrapper[5113]: E0130 00:25:55.778076 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:25:56 crc kubenswrapper[5113]: I0130 00:25:56.418887 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-759f64656b-q8d2f_76bccc71-5eff-4487-bf13-b106e73ff811/cert-manager-controller/0.log" Jan 30 00:25:56 crc kubenswrapper[5113]: I0130 00:25:56.617477 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-8966b78d4-4r2ng_f19fa386-988c-4eab-8cb4-383aa28e594b/cert-manager-cainjector/0.log" Jan 30 00:25:56 crc kubenswrapper[5113]: I0130 00:25:56.659899 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-597b96b99b-dzs4m_e6d68b87-9e39-420a-bc88-c3bc641ff82e/cert-manager-webhook/0.log" Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.146459 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29495546-zv9dh"] Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.149040 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="804b508d-8683-48c2-bd0f-3b254cbde7b2" containerName="extract-utilities" Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.149628 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="804b508d-8683-48c2-bd0f-3b254cbde7b2" containerName="extract-utilities" Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.149743 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="804b508d-8683-48c2-bd0f-3b254cbde7b2" containerName="registry-server" Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.149824 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="804b508d-8683-48c2-bd0f-3b254cbde7b2" containerName="registry-server" Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.149919 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="804b508d-8683-48c2-bd0f-3b254cbde7b2" containerName="extract-content" Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.150004 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="804b508d-8683-48c2-bd0f-3b254cbde7b2" containerName="extract-content" Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.150280 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="804b508d-8683-48c2-bd0f-3b254cbde7b2" containerName="registry-server" Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.160287 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495546-zv9dh" Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.163771 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"openshift-service-ca.crt\"" Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.164960 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-infra\"/\"csr-approver-sa-dockercfg-kshml\"" Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.166343 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"kube-root-ca.crt\"" Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.205236 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495546-zv9dh"] Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.313719 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hr5x6\" (UniqueName: \"kubernetes.io/projected/e2f9be88-53da-4281-879b-bd29797033ed-kube-api-access-hr5x6\") pod \"auto-csr-approver-29495546-zv9dh\" (UID: \"e2f9be88-53da-4281-879b-bd29797033ed\") " pod="openshift-infra/auto-csr-approver-29495546-zv9dh" Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.415507 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-hr5x6\" (UniqueName: \"kubernetes.io/projected/e2f9be88-53da-4281-879b-bd29797033ed-kube-api-access-hr5x6\") pod \"auto-csr-approver-29495546-zv9dh\" (UID: \"e2f9be88-53da-4281-879b-bd29797033ed\") " pod="openshift-infra/auto-csr-approver-29495546-zv9dh" Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.445071 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-hr5x6\" (UniqueName: \"kubernetes.io/projected/e2f9be88-53da-4281-879b-bd29797033ed-kube-api-access-hr5x6\") pod \"auto-csr-approver-29495546-zv9dh\" (UID: \"e2f9be88-53da-4281-879b-bd29797033ed\") " pod="openshift-infra/auto-csr-approver-29495546-zv9dh" Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.522211 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495546-zv9dh" Jan 30 00:26:00 crc kubenswrapper[5113]: I0130 00:26:00.744476 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495546-zv9dh"] Jan 30 00:26:01 crc kubenswrapper[5113]: I0130 00:26:01.253123 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495546-zv9dh" event={"ID":"e2f9be88-53da-4281-879b-bd29797033ed","Type":"ContainerStarted","Data":"552a85cb5b8ff96fb8b055f6b9b2d5e92fc59bd79767e17c37964eeb01729aa6"} Jan 30 00:26:02 crc kubenswrapper[5113]: I0130 00:26:02.263726 5113 generic.go:358] "Generic (PLEG): container finished" podID="e2f9be88-53da-4281-879b-bd29797033ed" containerID="3d50a0fbd9da0f9255e82bd5de6e38dbe6e7e6c7250503ba1e8dbcdf0411c2e8" exitCode=0 Jan 30 00:26:02 crc kubenswrapper[5113]: I0130 00:26:02.264660 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495546-zv9dh" event={"ID":"e2f9be88-53da-4281-879b-bd29797033ed","Type":"ContainerDied","Data":"3d50a0fbd9da0f9255e82bd5de6e38dbe6e7e6c7250503ba1e8dbcdf0411c2e8"} Jan 30 00:26:03 crc kubenswrapper[5113]: I0130 00:26:03.531168 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495546-zv9dh" Jan 30 00:26:03 crc kubenswrapper[5113]: I0130 00:26:03.571101 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hr5x6\" (UniqueName: \"kubernetes.io/projected/e2f9be88-53da-4281-879b-bd29797033ed-kube-api-access-hr5x6\") pod \"e2f9be88-53da-4281-879b-bd29797033ed\" (UID: \"e2f9be88-53da-4281-879b-bd29797033ed\") " Jan 30 00:26:03 crc kubenswrapper[5113]: I0130 00:26:03.578844 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2f9be88-53da-4281-879b-bd29797033ed-kube-api-access-hr5x6" (OuterVolumeSpecName: "kube-api-access-hr5x6") pod "e2f9be88-53da-4281-879b-bd29797033ed" (UID: "e2f9be88-53da-4281-879b-bd29797033ed"). InnerVolumeSpecName "kube-api-access-hr5x6". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:26:03 crc kubenswrapper[5113]: I0130 00:26:03.672807 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-hr5x6\" (UniqueName: \"kubernetes.io/projected/e2f9be88-53da-4281-879b-bd29797033ed-kube-api-access-hr5x6\") on node \"crc\" DevicePath \"\"" Jan 30 00:26:04 crc kubenswrapper[5113]: I0130 00:26:04.282673 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495546-zv9dh" Jan 30 00:26:04 crc kubenswrapper[5113]: I0130 00:26:04.282740 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495546-zv9dh" event={"ID":"e2f9be88-53da-4281-879b-bd29797033ed","Type":"ContainerDied","Data":"552a85cb5b8ff96fb8b055f6b9b2d5e92fc59bd79767e17c37964eeb01729aa6"} Jan 30 00:26:04 crc kubenswrapper[5113]: I0130 00:26:04.283363 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="552a85cb5b8ff96fb8b055f6b9b2d5e92fc59bd79767e17c37964eeb01729aa6" Jan 30 00:26:04 crc kubenswrapper[5113]: I0130 00:26:04.601922 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29495540-hbbvk"] Jan 30 00:26:04 crc kubenswrapper[5113]: I0130 00:26:04.607584 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29495540-hbbvk"] Jan 30 00:26:04 crc kubenswrapper[5113]: I0130 00:26:04.781121 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41ff0f59-4052-4862-aa77-23c8f170082b" path="/var/lib/kubelet/pods/41ff0f59-4052-4862-aa77-23c8f170082b/volumes" Jan 30 00:26:10 crc kubenswrapper[5113]: E0130 00:26:10.782421 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:26:11 crc kubenswrapper[5113]: I0130 00:26:11.602084 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-9bc85b4bf-twh29_4444bab4-230c-48fb-b893-8d3e93807137/prometheus-operator/0.log" Jan 30 00:26:11 crc kubenswrapper[5113]: I0130 00:26:11.734285 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4_9bc38c6e-f8a9-4383-b930-43e56fe731f7/prometheus-operator-admission-webhook/0.log" Jan 30 00:26:11 crc kubenswrapper[5113]: I0130 00:26:11.753806 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk_0059f278-9b5a-4441-bd68-67ee6c139623/prometheus-operator-admission-webhook/0.log" Jan 30 00:26:11 crc kubenswrapper[5113]: I0130 00:26:11.941816 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-669c9f96b5-4c4gl_8c07fd1a-e7bd-414d-a32e-68fda0f0e4a5/perses-operator/0.log" Jan 30 00:26:11 crc kubenswrapper[5113]: I0130 00:26:11.965020 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-85c68dddb-l2hz8_e4c6bf15-ffcf-4513-a573-c2a328472abe/operator/0.log" Jan 30 00:26:21 crc kubenswrapper[5113]: I0130 00:26:21.195403 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:26:21 crc kubenswrapper[5113]: I0130 00:26:21.196092 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:26:21 crc kubenswrapper[5113]: I0130 00:26:21.196154 5113 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:26:21 crc kubenswrapper[5113]: I0130 00:26:21.196799 5113 kuberuntime_manager.go:1107] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c9c5d9da8823301bda322eade9ad07902d469bbebb6741898c8ea384844e2d83"} pod="openshift-machine-config-operator/machine-config-daemon-gxph5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 00:26:21 crc kubenswrapper[5113]: I0130 00:26:21.196871 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" containerID="cri-o://c9c5d9da8823301bda322eade9ad07902d469bbebb6741898c8ea384844e2d83" gracePeriod=600 Jan 30 00:26:21 crc kubenswrapper[5113]: I0130 00:26:21.415689 5113 generic.go:358] "Generic (PLEG): container finished" podID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerID="c9c5d9da8823301bda322eade9ad07902d469bbebb6741898c8ea384844e2d83" exitCode=0 Jan 30 00:26:21 crc kubenswrapper[5113]: I0130 00:26:21.415742 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" event={"ID":"dccb6bc1-d2db-4bf2-a0db-1c84219d0499","Type":"ContainerDied","Data":"c9c5d9da8823301bda322eade9ad07902d469bbebb6741898c8ea384844e2d83"} Jan 30 00:26:21 crc kubenswrapper[5113]: I0130 00:26:21.416172 5113 scope.go:117] "RemoveContainer" containerID="dbf3b234b28fc071e0fac78c4e03f35f6c4a815279840f68d3fec8f928bdd4c7" Jan 30 00:26:21 crc kubenswrapper[5113]: E0130 00:26:21.776090 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:26:22 crc kubenswrapper[5113]: I0130 00:26:22.425583 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" event={"ID":"dccb6bc1-d2db-4bf2-a0db-1c84219d0499","Type":"ContainerStarted","Data":"519842b7b2c623a3f39dbde76a1ad7ac7532c3d5442d27a649b2519cd9045b25"} Jan 30 00:26:26 crc kubenswrapper[5113]: I0130 00:26:26.909134 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7_b060f104-6f51-4e29-987f-78aec9eb9a4f/util/0.log" Jan 30 00:26:27 crc kubenswrapper[5113]: I0130 00:26:27.120919 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7_b060f104-6f51-4e29-987f-78aec9eb9a4f/util/0.log" Jan 30 00:26:27 crc kubenswrapper[5113]: I0130 00:26:27.122181 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7_b060f104-6f51-4e29-987f-78aec9eb9a4f/pull/0.log" Jan 30 00:26:27 crc kubenswrapper[5113]: I0130 00:26:27.134383 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7_b060f104-6f51-4e29-987f-78aec9eb9a4f/pull/0.log" Jan 30 00:26:27 crc kubenswrapper[5113]: I0130 00:26:27.259098 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7_b060f104-6f51-4e29-987f-78aec9eb9a4f/util/0.log" Jan 30 00:26:27 crc kubenswrapper[5113]: I0130 00:26:27.305355 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7_b060f104-6f51-4e29-987f-78aec9eb9a4f/extract/0.log" Jan 30 00:26:27 crc kubenswrapper[5113]: I0130 00:26:27.335235 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8fpfjj7_b060f104-6f51-4e29-987f-78aec9eb9a4f/pull/0.log" Jan 30 00:26:27 crc kubenswrapper[5113]: I0130 00:26:27.453777 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46_37e551ce-ff20-486b-986a-429cb060e341/util/0.log" Jan 30 00:26:27 crc kubenswrapper[5113]: I0130 00:26:27.682671 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46_37e551ce-ff20-486b-986a-429cb060e341/util/0.log" Jan 30 00:26:27 crc kubenswrapper[5113]: I0130 00:26:27.853113 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46_37e551ce-ff20-486b-986a-429cb060e341/util/0.log" Jan 30 00:26:28 crc kubenswrapper[5113]: I0130 00:26:28.051717 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm_dc777063-84e1-4b8d-93b5-8ac8bbfee31d/util/0.log" Jan 30 00:26:28 crc kubenswrapper[5113]: I0130 00:26:28.253570 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm_dc777063-84e1-4b8d-93b5-8ac8bbfee31d/pull/0.log" Jan 30 00:26:28 crc kubenswrapper[5113]: I0130 00:26:28.255217 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm_dc777063-84e1-4b8d-93b5-8ac8bbfee31d/util/0.log" Jan 30 00:26:28 crc kubenswrapper[5113]: I0130 00:26:28.292537 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm_dc777063-84e1-4b8d-93b5-8ac8bbfee31d/pull/0.log" Jan 30 00:26:28 crc kubenswrapper[5113]: I0130 00:26:28.420652 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm_dc777063-84e1-4b8d-93b5-8ac8bbfee31d/util/0.log" Jan 30 00:26:28 crc kubenswrapper[5113]: I0130 00:26:28.455555 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm_dc777063-84e1-4b8d-93b5-8ac8bbfee31d/pull/0.log" Jan 30 00:26:28 crc kubenswrapper[5113]: I0130 00:26:28.503053 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_925ad1f05bf386dc21bdfe2f8249c1fbfd04a404dec7a7fb6362d758e5jxmwm_dc777063-84e1-4b8d-93b5-8ac8bbfee31d/extract/0.log" Jan 30 00:26:28 crc kubenswrapper[5113]: I0130 00:26:28.631794 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4_f2c87f6f-31be-47ab-bcf7-4d6f1fd34415/util/0.log" Jan 30 00:26:28 crc kubenswrapper[5113]: I0130 00:26:28.839999 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4_f2c87f6f-31be-47ab-bcf7-4d6f1fd34415/pull/0.log" Jan 30 00:26:28 crc kubenswrapper[5113]: I0130 00:26:28.850163 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4_f2c87f6f-31be-47ab-bcf7-4d6f1fd34415/util/0.log" Jan 30 00:26:28 crc kubenswrapper[5113]: I0130 00:26:28.850598 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4_f2c87f6f-31be-47ab-bcf7-4d6f1fd34415/pull/0.log" Jan 30 00:26:29 crc kubenswrapper[5113]: I0130 00:26:29.049206 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4_f2c87f6f-31be-47ab-bcf7-4d6f1fd34415/pull/0.log" Jan 30 00:26:29 crc kubenswrapper[5113]: I0130 00:26:29.053122 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4_f2c87f6f-31be-47ab-bcf7-4d6f1fd34415/extract/0.log" Jan 30 00:26:29 crc kubenswrapper[5113]: I0130 00:26:29.064940 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08rdqr4_f2c87f6f-31be-47ab-bcf7-4d6f1fd34415/util/0.log" Jan 30 00:26:29 crc kubenswrapper[5113]: I0130 00:26:29.253613 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-22l6b_ca96fb3e-7dc2-4095-a77a-7125b34d5804/extract-utilities/0.log" Jan 30 00:26:29 crc kubenswrapper[5113]: I0130 00:26:29.451502 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-22l6b_ca96fb3e-7dc2-4095-a77a-7125b34d5804/extract-utilities/0.log" Jan 30 00:26:29 crc kubenswrapper[5113]: I0130 00:26:29.464910 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-22l6b_ca96fb3e-7dc2-4095-a77a-7125b34d5804/extract-content/0.log" Jan 30 00:26:29 crc kubenswrapper[5113]: I0130 00:26:29.480467 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-22l6b_ca96fb3e-7dc2-4095-a77a-7125b34d5804/extract-content/0.log" Jan 30 00:26:29 crc kubenswrapper[5113]: I0130 00:26:29.626618 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-22l6b_ca96fb3e-7dc2-4095-a77a-7125b34d5804/extract-utilities/0.log" Jan 30 00:26:29 crc kubenswrapper[5113]: I0130 00:26:29.679509 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-22l6b_ca96fb3e-7dc2-4095-a77a-7125b34d5804/extract-content/0.log" Jan 30 00:26:29 crc kubenswrapper[5113]: I0130 00:26:29.741255 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-22l6b_ca96fb3e-7dc2-4095-a77a-7125b34d5804/registry-server/0.log" Jan 30 00:26:29 crc kubenswrapper[5113]: I0130 00:26:29.832314 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-xwlmk_3f73f040-c591-4342-a2b3-cfa77b826069/extract-utilities/0.log" Jan 30 00:26:30 crc kubenswrapper[5113]: I0130 00:26:30.025495 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-xwlmk_3f73f040-c591-4342-a2b3-cfa77b826069/extract-utilities/0.log" Jan 30 00:26:30 crc kubenswrapper[5113]: I0130 00:26:30.032239 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-xwlmk_3f73f040-c591-4342-a2b3-cfa77b826069/extract-content/0.log" Jan 30 00:26:30 crc kubenswrapper[5113]: I0130 00:26:30.032260 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-xwlmk_3f73f040-c591-4342-a2b3-cfa77b826069/extract-content/0.log" Jan 30 00:26:30 crc kubenswrapper[5113]: I0130 00:26:30.233693 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-xwlmk_3f73f040-c591-4342-a2b3-cfa77b826069/extract-content/0.log" Jan 30 00:26:30 crc kubenswrapper[5113]: I0130 00:26:30.239418 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-xwlmk_3f73f040-c591-4342-a2b3-cfa77b826069/extract-utilities/0.log" Jan 30 00:26:30 crc kubenswrapper[5113]: I0130 00:26:30.335704 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-547dbd544d-6nt5s_9da74617-052a-44f4-a53c-b8e8ca99f9da/marketplace-operator/0.log" Jan 30 00:26:30 crc kubenswrapper[5113]: I0130 00:26:30.502886 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zvvkv_95aea721-4924-4c1a-9a22-8e078dbd1a05/extract-utilities/0.log" Jan 30 00:26:30 crc kubenswrapper[5113]: I0130 00:26:30.581661 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-xwlmk_3f73f040-c591-4342-a2b3-cfa77b826069/registry-server/0.log" Jan 30 00:26:30 crc kubenswrapper[5113]: I0130 00:26:30.725193 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zvvkv_95aea721-4924-4c1a-9a22-8e078dbd1a05/extract-content/0.log" Jan 30 00:26:30 crc kubenswrapper[5113]: I0130 00:26:30.735584 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zvvkv_95aea721-4924-4c1a-9a22-8e078dbd1a05/extract-utilities/0.log" Jan 30 00:26:30 crc kubenswrapper[5113]: I0130 00:26:30.744666 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zvvkv_95aea721-4924-4c1a-9a22-8e078dbd1a05/extract-content/0.log" Jan 30 00:26:30 crc kubenswrapper[5113]: I0130 00:26:30.924598 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zvvkv_95aea721-4924-4c1a-9a22-8e078dbd1a05/extract-utilities/0.log" Jan 30 00:26:30 crc kubenswrapper[5113]: I0130 00:26:30.929692 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zvvkv_95aea721-4924-4c1a-9a22-8e078dbd1a05/extract-content/0.log" Jan 30 00:26:31 crc kubenswrapper[5113]: I0130 00:26:31.148948 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zvvkv_95aea721-4924-4c1a-9a22-8e078dbd1a05/registry-server/0.log" Jan 30 00:26:34 crc kubenswrapper[5113]: E0130 00:26:34.776997 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:26:42 crc kubenswrapper[5113]: I0130 00:26:42.182103 5113 scope.go:117] "RemoveContainer" containerID="0769b7379741b2af5c39c3380c9e5f271428b6267e7671fc02902fb1fc451d88" Jan 30 00:26:43 crc kubenswrapper[5113]: I0130 00:26:43.149539 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-85ccdc6654-7r2m4_9bc38c6e-f8a9-4383-b930-43e56fe731f7/prometheus-operator-admission-webhook/0.log" Jan 30 00:26:43 crc kubenswrapper[5113]: I0130 00:26:43.169936 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-85ccdc6654-xcpqk_0059f278-9b5a-4441-bd68-67ee6c139623/prometheus-operator-admission-webhook/0.log" Jan 30 00:26:43 crc kubenswrapper[5113]: I0130 00:26:43.181401 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-9bc85b4bf-twh29_4444bab4-230c-48fb-b893-8d3e93807137/prometheus-operator/0.log" Jan 30 00:26:43 crc kubenswrapper[5113]: I0130 00:26:43.339678 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-85c68dddb-l2hz8_e4c6bf15-ffcf-4513-a573-c2a328472abe/operator/0.log" Jan 30 00:26:43 crc kubenswrapper[5113]: I0130 00:26:43.355063 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-669c9f96b5-4c4gl_8c07fd1a-e7bd-414d-a32e-68fda0f0e4a5/perses-operator/0.log" Jan 30 00:26:45 crc kubenswrapper[5113]: E0130 00:26:45.774772 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:26:59 crc kubenswrapper[5113]: E0130 00:26:59.776770 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:27:11 crc kubenswrapper[5113]: E0130 00:27:11.776448 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:27:23 crc kubenswrapper[5113]: E0130 00:27:23.816839 5113 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" image="registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb" Jan 30 00:27:23 crc kubenswrapper[5113]: E0130 00:27:23.817674 5113 kuberuntime_manager.go:1358] "Unhandled Error" err="init container &Container{Name:pull,Image:registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb,Command:[/util/cpb /bundle],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:bundle,ReadOnly:false,MountPath:/bundle,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:util,ReadOnly:false,MountPath:/util,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pxmz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000240000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod 8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46_openshift-marketplace(37e551ce-ff20-486b-986a-429cb060e341): ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \"https://registry.connect.redhat.com/v2/\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving" logger="UnhandledError" Jan 30 00:27:23 crc kubenswrapper[5113]: E0130 00:27:23.818983 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ErrImagePull: \"unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:27:28 crc kubenswrapper[5113]: I0130 00:27:28.970967 5113 generic.go:358] "Generic (PLEG): container finished" podID="e86f32f3-00ea-4ada-b7ed-be7376c43705" containerID="de2f9a0576aeadb6423b2954d3732784c671052e555256827a9230e40b127106" exitCode=0 Jan 30 00:27:28 crc kubenswrapper[5113]: I0130 00:27:28.971108 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-bs6s9/must-gather-r5v74" event={"ID":"e86f32f3-00ea-4ada-b7ed-be7376c43705","Type":"ContainerDied","Data":"de2f9a0576aeadb6423b2954d3732784c671052e555256827a9230e40b127106"} Jan 30 00:27:28 crc kubenswrapper[5113]: I0130 00:27:28.975059 5113 scope.go:117] "RemoveContainer" containerID="de2f9a0576aeadb6423b2954d3732784c671052e555256827a9230e40b127106" Jan 30 00:27:29 crc kubenswrapper[5113]: I0130 00:27:29.416477 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-bs6s9_must-gather-r5v74_e86f32f3-00ea-4ada-b7ed-be7376c43705/gather/0.log" Jan 30 00:27:34 crc kubenswrapper[5113]: E0130 00:27:34.776675 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:27:35 crc kubenswrapper[5113]: I0130 00:27:35.674587 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-bs6s9/must-gather-r5v74"] Jan 30 00:27:35 crc kubenswrapper[5113]: I0130 00:27:35.675100 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-must-gather-bs6s9/must-gather-r5v74" podUID="e86f32f3-00ea-4ada-b7ed-be7376c43705" containerName="copy" containerID="cri-o://eccaec654d26e6fd0ddba0e3efd7308740ab2e4b375eca624fb2344ac7cac63f" gracePeriod=2 Jan 30 00:27:35 crc kubenswrapper[5113]: I0130 00:27:35.677575 5113 status_manager.go:895] "Failed to get status for pod" podUID="e86f32f3-00ea-4ada-b7ed-be7376c43705" pod="openshift-must-gather-bs6s9/must-gather-r5v74" err="pods \"must-gather-r5v74\" is forbidden: User \"system:node:crc\" cannot get resource \"pods\" in API group \"\" in the namespace \"openshift-must-gather-bs6s9\": no relationship found between node 'crc' and this object" Jan 30 00:27:35 crc kubenswrapper[5113]: I0130 00:27:35.680329 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-bs6s9/must-gather-r5v74"] Jan 30 00:27:36 crc kubenswrapper[5113]: I0130 00:27:36.038300 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-bs6s9_must-gather-r5v74_e86f32f3-00ea-4ada-b7ed-be7376c43705/copy/0.log" Jan 30 00:27:36 crc kubenswrapper[5113]: I0130 00:27:36.039189 5113 generic.go:358] "Generic (PLEG): container finished" podID="e86f32f3-00ea-4ada-b7ed-be7376c43705" containerID="eccaec654d26e6fd0ddba0e3efd7308740ab2e4b375eca624fb2344ac7cac63f" exitCode=143 Jan 30 00:27:36 crc kubenswrapper[5113]: I0130 00:27:36.088647 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-bs6s9_must-gather-r5v74_e86f32f3-00ea-4ada-b7ed-be7376c43705/copy/0.log" Jan 30 00:27:36 crc kubenswrapper[5113]: I0130 00:27:36.089040 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bs6s9/must-gather-r5v74" Jan 30 00:27:36 crc kubenswrapper[5113]: I0130 00:27:36.091110 5113 status_manager.go:895] "Failed to get status for pod" podUID="e86f32f3-00ea-4ada-b7ed-be7376c43705" pod="openshift-must-gather-bs6s9/must-gather-r5v74" err="pods \"must-gather-r5v74\" is forbidden: User \"system:node:crc\" cannot get resource \"pods\" in API group \"\" in the namespace \"openshift-must-gather-bs6s9\": no relationship found between node 'crc' and this object" Jan 30 00:27:36 crc kubenswrapper[5113]: I0130 00:27:36.224908 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5fvx\" (UniqueName: \"kubernetes.io/projected/e86f32f3-00ea-4ada-b7ed-be7376c43705-kube-api-access-v5fvx\") pod \"e86f32f3-00ea-4ada-b7ed-be7376c43705\" (UID: \"e86f32f3-00ea-4ada-b7ed-be7376c43705\") " Jan 30 00:27:36 crc kubenswrapper[5113]: I0130 00:27:36.225081 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e86f32f3-00ea-4ada-b7ed-be7376c43705-must-gather-output\") pod \"e86f32f3-00ea-4ada-b7ed-be7376c43705\" (UID: \"e86f32f3-00ea-4ada-b7ed-be7376c43705\") " Jan 30 00:27:36 crc kubenswrapper[5113]: I0130 00:27:36.244070 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e86f32f3-00ea-4ada-b7ed-be7376c43705-kube-api-access-v5fvx" (OuterVolumeSpecName: "kube-api-access-v5fvx") pod "e86f32f3-00ea-4ada-b7ed-be7376c43705" (UID: "e86f32f3-00ea-4ada-b7ed-be7376c43705"). InnerVolumeSpecName "kube-api-access-v5fvx". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:27:36 crc kubenswrapper[5113]: I0130 00:27:36.275382 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e86f32f3-00ea-4ada-b7ed-be7376c43705-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "e86f32f3-00ea-4ada-b7ed-be7376c43705" (UID: "e86f32f3-00ea-4ada-b7ed-be7376c43705"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGIDValue "" Jan 30 00:27:36 crc kubenswrapper[5113]: I0130 00:27:36.327232 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-v5fvx\" (UniqueName: \"kubernetes.io/projected/e86f32f3-00ea-4ada-b7ed-be7376c43705-kube-api-access-v5fvx\") on node \"crc\" DevicePath \"\"" Jan 30 00:27:36 crc kubenswrapper[5113]: I0130 00:27:36.327299 5113 reconciler_common.go:299] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/e86f32f3-00ea-4ada-b7ed-be7376c43705-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 30 00:27:36 crc kubenswrapper[5113]: I0130 00:27:36.786300 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e86f32f3-00ea-4ada-b7ed-be7376c43705" path="/var/lib/kubelet/pods/e86f32f3-00ea-4ada-b7ed-be7376c43705/volumes" Jan 30 00:27:37 crc kubenswrapper[5113]: I0130 00:27:37.051367 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-bs6s9_must-gather-r5v74_e86f32f3-00ea-4ada-b7ed-be7376c43705/copy/0.log" Jan 30 00:27:37 crc kubenswrapper[5113]: I0130 00:27:37.052422 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-bs6s9/must-gather-r5v74" Jan 30 00:27:37 crc kubenswrapper[5113]: I0130 00:27:37.052437 5113 scope.go:117] "RemoveContainer" containerID="eccaec654d26e6fd0ddba0e3efd7308740ab2e4b375eca624fb2344ac7cac63f" Jan 30 00:27:37 crc kubenswrapper[5113]: I0130 00:27:37.087359 5113 scope.go:117] "RemoveContainer" containerID="de2f9a0576aeadb6423b2954d3732784c671052e555256827a9230e40b127106" Jan 30 00:27:45 crc kubenswrapper[5113]: E0130 00:27:45.779281 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:27:59 crc kubenswrapper[5113]: E0130 00:27:59.777069 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.143078 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29495548-9dw42"] Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.143819 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="e86f32f3-00ea-4ada-b7ed-be7376c43705" containerName="gather" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.143854 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="e86f32f3-00ea-4ada-b7ed-be7376c43705" containerName="gather" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.143879 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="e86f32f3-00ea-4ada-b7ed-be7376c43705" containerName="copy" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.143933 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="e86f32f3-00ea-4ada-b7ed-be7376c43705" containerName="copy" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.144021 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="e2f9be88-53da-4281-879b-bd29797033ed" containerName="oc" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.144046 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2f9be88-53da-4281-879b-bd29797033ed" containerName="oc" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.144231 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="e86f32f3-00ea-4ada-b7ed-be7376c43705" containerName="copy" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.144262 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="e2f9be88-53da-4281-879b-bd29797033ed" containerName="oc" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.144272 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="e86f32f3-00ea-4ada-b7ed-be7376c43705" containerName="gather" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.158835 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495548-9dw42" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.159545 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495548-9dw42"] Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.163591 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"kube-root-ca.crt\"" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.163895 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-infra\"/\"csr-approver-sa-dockercfg-kshml\"" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.166227 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"openshift-service-ca.crt\"" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.246514 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2j5m\" (UniqueName: \"kubernetes.io/projected/b2630c8c-3b4a-4478-8130-a9ed9ff4959f-kube-api-access-b2j5m\") pod \"auto-csr-approver-29495548-9dw42\" (UID: \"b2630c8c-3b4a-4478-8130-a9ed9ff4959f\") " pod="openshift-infra/auto-csr-approver-29495548-9dw42" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.348379 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-b2j5m\" (UniqueName: \"kubernetes.io/projected/b2630c8c-3b4a-4478-8130-a9ed9ff4959f-kube-api-access-b2j5m\") pod \"auto-csr-approver-29495548-9dw42\" (UID: \"b2630c8c-3b4a-4478-8130-a9ed9ff4959f\") " pod="openshift-infra/auto-csr-approver-29495548-9dw42" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.372997 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2j5m\" (UniqueName: \"kubernetes.io/projected/b2630c8c-3b4a-4478-8130-a9ed9ff4959f-kube-api-access-b2j5m\") pod \"auto-csr-approver-29495548-9dw42\" (UID: \"b2630c8c-3b4a-4478-8130-a9ed9ff4959f\") " pod="openshift-infra/auto-csr-approver-29495548-9dw42" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.501651 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495548-9dw42" Jan 30 00:28:00 crc kubenswrapper[5113]: I0130 00:28:00.957272 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495548-9dw42"] Jan 30 00:28:01 crc kubenswrapper[5113]: I0130 00:28:01.253251 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495548-9dw42" event={"ID":"b2630c8c-3b4a-4478-8130-a9ed9ff4959f","Type":"ContainerStarted","Data":"6de66e6ce0b5ed321a6ebd33fe33b6815f1563bddf7b4139215eed09ab493f64"} Jan 30 00:28:03 crc kubenswrapper[5113]: I0130 00:28:03.274446 5113 generic.go:358] "Generic (PLEG): container finished" podID="b2630c8c-3b4a-4478-8130-a9ed9ff4959f" containerID="5c98fa50bd18ef10393fb35d4f32e49e8a674839f117e7944a2e4400e869e8f9" exitCode=0 Jan 30 00:28:03 crc kubenswrapper[5113]: I0130 00:28:03.274579 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495548-9dw42" event={"ID":"b2630c8c-3b4a-4478-8130-a9ed9ff4959f","Type":"ContainerDied","Data":"5c98fa50bd18ef10393fb35d4f32e49e8a674839f117e7944a2e4400e869e8f9"} Jan 30 00:28:04 crc kubenswrapper[5113]: I0130 00:28:04.574310 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495548-9dw42" Jan 30 00:28:04 crc kubenswrapper[5113]: I0130 00:28:04.617361 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b2j5m\" (UniqueName: \"kubernetes.io/projected/b2630c8c-3b4a-4478-8130-a9ed9ff4959f-kube-api-access-b2j5m\") pod \"b2630c8c-3b4a-4478-8130-a9ed9ff4959f\" (UID: \"b2630c8c-3b4a-4478-8130-a9ed9ff4959f\") " Jan 30 00:28:04 crc kubenswrapper[5113]: I0130 00:28:04.625502 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2630c8c-3b4a-4478-8130-a9ed9ff4959f-kube-api-access-b2j5m" (OuterVolumeSpecName: "kube-api-access-b2j5m") pod "b2630c8c-3b4a-4478-8130-a9ed9ff4959f" (UID: "b2630c8c-3b4a-4478-8130-a9ed9ff4959f"). InnerVolumeSpecName "kube-api-access-b2j5m". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:28:04 crc kubenswrapper[5113]: I0130 00:28:04.719245 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-b2j5m\" (UniqueName: \"kubernetes.io/projected/b2630c8c-3b4a-4478-8130-a9ed9ff4959f-kube-api-access-b2j5m\") on node \"crc\" DevicePath \"\"" Jan 30 00:28:05 crc kubenswrapper[5113]: I0130 00:28:05.299104 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495548-9dw42" event={"ID":"b2630c8c-3b4a-4478-8130-a9ed9ff4959f","Type":"ContainerDied","Data":"6de66e6ce0b5ed321a6ebd33fe33b6815f1563bddf7b4139215eed09ab493f64"} Jan 30 00:28:05 crc kubenswrapper[5113]: I0130 00:28:05.299597 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6de66e6ce0b5ed321a6ebd33fe33b6815f1563bddf7b4139215eed09ab493f64" Jan 30 00:28:05 crc kubenswrapper[5113]: I0130 00:28:05.299936 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495548-9dw42" Jan 30 00:28:05 crc kubenswrapper[5113]: I0130 00:28:05.661481 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29495542-n5nbw"] Jan 30 00:28:05 crc kubenswrapper[5113]: I0130 00:28:05.665344 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29495542-n5nbw"] Jan 30 00:28:06 crc kubenswrapper[5113]: I0130 00:28:06.788069 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42c94cc3-978a-4e0c-824f-07666143914d" path="/var/lib/kubelet/pods/42c94cc3-978a-4e0c-824f-07666143914d/volumes" Jan 30 00:28:13 crc kubenswrapper[5113]: E0130 00:28:13.777122 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:28:21 crc kubenswrapper[5113]: I0130 00:28:21.195465 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:28:21 crc kubenswrapper[5113]: I0130 00:28:21.196626 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:28:25 crc kubenswrapper[5113]: E0130 00:28:25.776863 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:28:37 crc kubenswrapper[5113]: E0130 00:28:37.776639 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:28:42 crc kubenswrapper[5113]: I0130 00:28:42.335084 5113 scope.go:117] "RemoveContainer" containerID="f0c2111164a2fec15cfb261ec072172eb4e7ddc17fc443dfb57b78c86a591b66" Jan 30 00:28:49 crc kubenswrapper[5113]: E0130 00:28:49.775723 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:28:51 crc kubenswrapper[5113]: I0130 00:28:51.195216 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:28:51 crc kubenswrapper[5113]: I0130 00:28:51.195853 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:29:04 crc kubenswrapper[5113]: E0130 00:29:04.775490 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:29:16 crc kubenswrapper[5113]: E0130 00:29:16.777829 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:29:21 crc kubenswrapper[5113]: I0130 00:29:21.196040 5113 patch_prober.go:28] interesting pod/machine-config-daemon-gxph5 container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 00:29:21 crc kubenswrapper[5113]: I0130 00:29:21.196742 5113 prober.go:120] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 00:29:21 crc kubenswrapper[5113]: I0130 00:29:21.196838 5113 kubelet.go:2658] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" Jan 30 00:29:21 crc kubenswrapper[5113]: I0130 00:29:21.197811 5113 kuberuntime_manager.go:1107] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"519842b7b2c623a3f39dbde76a1ad7ac7532c3d5442d27a649b2519cd9045b25"} pod="openshift-machine-config-operator/machine-config-daemon-gxph5" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 00:29:21 crc kubenswrapper[5113]: I0130 00:29:21.198002 5113 kuberuntime_container.go:858] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" podUID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerName="machine-config-daemon" containerID="cri-o://519842b7b2c623a3f39dbde76a1ad7ac7532c3d5442d27a649b2519cd9045b25" gracePeriod=600 Jan 30 00:29:22 crc kubenswrapper[5113]: I0130 00:29:22.012253 5113 generic.go:358] "Generic (PLEG): container finished" podID="dccb6bc1-d2db-4bf2-a0db-1c84219d0499" containerID="519842b7b2c623a3f39dbde76a1ad7ac7532c3d5442d27a649b2519cd9045b25" exitCode=0 Jan 30 00:29:22 crc kubenswrapper[5113]: I0130 00:29:22.012361 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" event={"ID":"dccb6bc1-d2db-4bf2-a0db-1c84219d0499","Type":"ContainerDied","Data":"519842b7b2c623a3f39dbde76a1ad7ac7532c3d5442d27a649b2519cd9045b25"} Jan 30 00:29:22 crc kubenswrapper[5113]: I0130 00:29:22.012968 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-gxph5" event={"ID":"dccb6bc1-d2db-4bf2-a0db-1c84219d0499","Type":"ContainerStarted","Data":"c842ae539050c0f27a7b281f24fe6aaa3575b4ca31d4d1d170d6d897b34607c1"} Jan 30 00:29:22 crc kubenswrapper[5113]: I0130 00:29:22.013030 5113 scope.go:117] "RemoveContainer" containerID="c9c5d9da8823301bda322eade9ad07902d469bbebb6741898c8ea384844e2d83" Jan 30 00:29:28 crc kubenswrapper[5113]: E0130 00:29:28.776764 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:29:41 crc kubenswrapper[5113]: I0130 00:29:41.222352 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-576bd486d8-68jgg_3bde0ed2-71a3-4281-b21d-af61bdb778ef/oauth-openshift/1.log" Jan 30 00:29:41 crc kubenswrapper[5113]: I0130 00:29:41.223640 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-authentication_oauth-openshift-576bd486d8-68jgg_3bde0ed2-71a3-4281-b21d-af61bdb778ef/oauth-openshift/1.log" Jan 30 00:29:41 crc kubenswrapper[5113]: I0130 00:29:41.265918 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mbd62_8ad95d7b-7c01-4672-8614-0cc8e52c0d79/kube-multus/0.log" Jan 30 00:29:41 crc kubenswrapper[5113]: I0130 00:29:41.267337 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-mbd62_8ad95d7b-7c01-4672-8614-0cc8e52c0d79/kube-multus/0.log" Jan 30 00:29:41 crc kubenswrapper[5113]: I0130 00:29:41.270661 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/0.log" Jan 30 00:29:41 crc kubenswrapper[5113]: I0130 00:29:41.271689 5113 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_9f0bc7fcb0822a2c13eb2d22cd8c0641/kube-controller-manager/0.log" Jan 30 00:29:42 crc kubenswrapper[5113]: E0130 00:29:42.776160 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:29:53 crc kubenswrapper[5113]: E0130 00:29:53.776288 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.144537 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs"] Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.146948 5113 cpu_manager.go:401] "RemoveStaleState: containerMap: removing container" podUID="b2630c8c-3b4a-4478-8130-a9ed9ff4959f" containerName="oc" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.147007 5113 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2630c8c-3b4a-4478-8130-a9ed9ff4959f" containerName="oc" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.147216 5113 memory_manager.go:356] "RemoveStaleState removing state" podUID="b2630c8c-3b4a-4478-8130-a9ed9ff4959f" containerName="oc" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.158624 5113 kubelet.go:2537] "SyncLoop ADD" source="api" pods=["openshift-infra/auto-csr-approver-29495550-446x2"] Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.199073 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495550-446x2"] Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.199303 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495550-446x2" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.200438 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.202875 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-infra\"/\"csr-approver-sa-dockercfg-kshml\"" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.202886 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"kube-root-ca.crt\"" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.203584 5113 reflector.go:430] "Caches populated" type="*v1.Secret" reflector="object-\"openshift-operator-lifecycle-manager\"/\"collect-profiles-dockercfg-vfqp6\"" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.203643 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-operator-lifecycle-manager\"/\"collect-profiles-config\"" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.203594 5113 reflector.go:430] "Caches populated" type="*v1.ConfigMap" reflector="object-\"openshift-infra\"/\"openshift-service-ca.crt\"" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.209090 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs"] Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.220431 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fd4k\" (UniqueName: \"kubernetes.io/projected/3e10bada-94e4-40c9-bacb-39cea7378d43-kube-api-access-8fd4k\") pod \"auto-csr-approver-29495550-446x2\" (UID: \"3e10bada-94e4-40c9-bacb-39cea7378d43\") " pod="openshift-infra/auto-csr-approver-29495550-446x2" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.224805 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df851b9e-7445-4bc1-9dbf-257644132bbc-config-volume\") pod \"collect-profiles-29495550-w75gs\" (UID: \"df851b9e-7445-4bc1-9dbf-257644132bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.225129 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tn25\" (UniqueName: \"kubernetes.io/projected/df851b9e-7445-4bc1-9dbf-257644132bbc-kube-api-access-7tn25\") pod \"collect-profiles-29495550-w75gs\" (UID: \"df851b9e-7445-4bc1-9dbf-257644132bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.226399 5113 reconciler_common.go:251] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df851b9e-7445-4bc1-9dbf-257644132bbc-secret-volume\") pod \"collect-profiles-29495550-w75gs\" (UID: \"df851b9e-7445-4bc1-9dbf-257644132bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.327766 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-7tn25\" (UniqueName: \"kubernetes.io/projected/df851b9e-7445-4bc1-9dbf-257644132bbc-kube-api-access-7tn25\") pod \"collect-profiles-29495550-w75gs\" (UID: \"df851b9e-7445-4bc1-9dbf-257644132bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.327837 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df851b9e-7445-4bc1-9dbf-257644132bbc-secret-volume\") pod \"collect-profiles-29495550-w75gs\" (UID: \"df851b9e-7445-4bc1-9dbf-257644132bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.327889 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"kube-api-access-8fd4k\" (UniqueName: \"kubernetes.io/projected/3e10bada-94e4-40c9-bacb-39cea7378d43-kube-api-access-8fd4k\") pod \"auto-csr-approver-29495550-446x2\" (UID: \"3e10bada-94e4-40c9-bacb-39cea7378d43\") " pod="openshift-infra/auto-csr-approver-29495550-446x2" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.327935 5113 reconciler_common.go:224] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df851b9e-7445-4bc1-9dbf-257644132bbc-config-volume\") pod \"collect-profiles-29495550-w75gs\" (UID: \"df851b9e-7445-4bc1-9dbf-257644132bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.330796 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df851b9e-7445-4bc1-9dbf-257644132bbc-config-volume\") pod \"collect-profiles-29495550-w75gs\" (UID: \"df851b9e-7445-4bc1-9dbf-257644132bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.336711 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df851b9e-7445-4bc1-9dbf-257644132bbc-secret-volume\") pod \"collect-profiles-29495550-w75gs\" (UID: \"df851b9e-7445-4bc1-9dbf-257644132bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.352133 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fd4k\" (UniqueName: \"kubernetes.io/projected/3e10bada-94e4-40c9-bacb-39cea7378d43-kube-api-access-8fd4k\") pod \"auto-csr-approver-29495550-446x2\" (UID: \"3e10bada-94e4-40c9-bacb-39cea7378d43\") " pod="openshift-infra/auto-csr-approver-29495550-446x2" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.352330 5113 operation_generator.go:615] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tn25\" (UniqueName: \"kubernetes.io/projected/df851b9e-7445-4bc1-9dbf-257644132bbc-kube-api-access-7tn25\") pod \"collect-profiles-29495550-w75gs\" (UID: \"df851b9e-7445-4bc1-9dbf-257644132bbc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.523094 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495550-446x2" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.534783 5113 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs" Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.760781 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-infra/auto-csr-approver-29495550-446x2"] Jan 30 00:30:00 crc kubenswrapper[5113]: W0130 00:30:00.773222 5113 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e10bada_94e4_40c9_bacb_39cea7378d43.slice/crio-f94e53a6a9e328335312cc06a7487908bb2b6f84d198f3298c5bf4c8b3611e23 WatchSource:0}: Error finding container f94e53a6a9e328335312cc06a7487908bb2b6f84d198f3298c5bf4c8b3611e23: Status 404 returned error can't find the container with id f94e53a6a9e328335312cc06a7487908bb2b6f84d198f3298c5bf4c8b3611e23 Jan 30 00:30:00 crc kubenswrapper[5113]: I0130 00:30:00.794611 5113 kubelet.go:2544] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs"] Jan 30 00:30:01 crc kubenswrapper[5113]: I0130 00:30:01.359084 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495550-446x2" event={"ID":"3e10bada-94e4-40c9-bacb-39cea7378d43","Type":"ContainerStarted","Data":"f94e53a6a9e328335312cc06a7487908bb2b6f84d198f3298c5bf4c8b3611e23"} Jan 30 00:30:01 crc kubenswrapper[5113]: I0130 00:30:01.361190 5113 generic.go:358] "Generic (PLEG): container finished" podID="df851b9e-7445-4bc1-9dbf-257644132bbc" containerID="29bc94bdf24d08c93b36e108700d988c86f243f978a33e037d9673cde3d23e4d" exitCode=0 Jan 30 00:30:01 crc kubenswrapper[5113]: I0130 00:30:01.361295 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs" event={"ID":"df851b9e-7445-4bc1-9dbf-257644132bbc","Type":"ContainerDied","Data":"29bc94bdf24d08c93b36e108700d988c86f243f978a33e037d9673cde3d23e4d"} Jan 30 00:30:01 crc kubenswrapper[5113]: I0130 00:30:01.361315 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs" event={"ID":"df851b9e-7445-4bc1-9dbf-257644132bbc","Type":"ContainerStarted","Data":"4a46d575b4a5affb4184530ecdedbb1adafabd6f27c7f1959a8eb536f532bd5a"} Jan 30 00:30:02 crc kubenswrapper[5113]: I0130 00:30:02.632717 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs" Jan 30 00:30:02 crc kubenswrapper[5113]: I0130 00:30:02.689966 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df851b9e-7445-4bc1-9dbf-257644132bbc-secret-volume\") pod \"df851b9e-7445-4bc1-9dbf-257644132bbc\" (UID: \"df851b9e-7445-4bc1-9dbf-257644132bbc\") " Jan 30 00:30:02 crc kubenswrapper[5113]: I0130 00:30:02.690055 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df851b9e-7445-4bc1-9dbf-257644132bbc-config-volume\") pod \"df851b9e-7445-4bc1-9dbf-257644132bbc\" (UID: \"df851b9e-7445-4bc1-9dbf-257644132bbc\") " Jan 30 00:30:02 crc kubenswrapper[5113]: I0130 00:30:02.690143 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7tn25\" (UniqueName: \"kubernetes.io/projected/df851b9e-7445-4bc1-9dbf-257644132bbc-kube-api-access-7tn25\") pod \"df851b9e-7445-4bc1-9dbf-257644132bbc\" (UID: \"df851b9e-7445-4bc1-9dbf-257644132bbc\") " Jan 30 00:30:02 crc kubenswrapper[5113]: I0130 00:30:02.691609 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df851b9e-7445-4bc1-9dbf-257644132bbc-config-volume" (OuterVolumeSpecName: "config-volume") pod "df851b9e-7445-4bc1-9dbf-257644132bbc" (UID: "df851b9e-7445-4bc1-9dbf-257644132bbc"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGIDValue "" Jan 30 00:30:02 crc kubenswrapper[5113]: I0130 00:30:02.700051 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df851b9e-7445-4bc1-9dbf-257644132bbc-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "df851b9e-7445-4bc1-9dbf-257644132bbc" (UID: "df851b9e-7445-4bc1-9dbf-257644132bbc"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGIDValue "" Jan 30 00:30:02 crc kubenswrapper[5113]: I0130 00:30:02.700100 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df851b9e-7445-4bc1-9dbf-257644132bbc-kube-api-access-7tn25" (OuterVolumeSpecName: "kube-api-access-7tn25") pod "df851b9e-7445-4bc1-9dbf-257644132bbc" (UID: "df851b9e-7445-4bc1-9dbf-257644132bbc"). InnerVolumeSpecName "kube-api-access-7tn25". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:30:02 crc kubenswrapper[5113]: I0130 00:30:02.791978 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-7tn25\" (UniqueName: \"kubernetes.io/projected/df851b9e-7445-4bc1-9dbf-257644132bbc-kube-api-access-7tn25\") on node \"crc\" DevicePath \"\"" Jan 30 00:30:02 crc kubenswrapper[5113]: I0130 00:30:02.792016 5113 reconciler_common.go:299] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/df851b9e-7445-4bc1-9dbf-257644132bbc-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 30 00:30:02 crc kubenswrapper[5113]: I0130 00:30:02.792028 5113 reconciler_common.go:299] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/df851b9e-7445-4bc1-9dbf-257644132bbc-config-volume\") on node \"crc\" DevicePath \"\"" Jan 30 00:30:03 crc kubenswrapper[5113]: I0130 00:30:03.379095 5113 generic.go:358] "Generic (PLEG): container finished" podID="3e10bada-94e4-40c9-bacb-39cea7378d43" containerID="ca210ad1ecf1e9b52578b86b8687d13714ae59f4ba8eaf0f41b49735df7c4ffb" exitCode=0 Jan 30 00:30:03 crc kubenswrapper[5113]: I0130 00:30:03.380262 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495550-446x2" event={"ID":"3e10bada-94e4-40c9-bacb-39cea7378d43","Type":"ContainerDied","Data":"ca210ad1ecf1e9b52578b86b8687d13714ae59f4ba8eaf0f41b49735df7c4ffb"} Jan 30 00:30:03 crc kubenswrapper[5113]: I0130 00:30:03.382875 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs" event={"ID":"df851b9e-7445-4bc1-9dbf-257644132bbc","Type":"ContainerDied","Data":"4a46d575b4a5affb4184530ecdedbb1adafabd6f27c7f1959a8eb536f532bd5a"} Jan 30 00:30:03 crc kubenswrapper[5113]: I0130 00:30:03.382944 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4a46d575b4a5affb4184530ecdedbb1adafabd6f27c7f1959a8eb536f532bd5a" Jan 30 00:30:03 crc kubenswrapper[5113]: I0130 00:30:03.383086 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29495550-w75gs" Jan 30 00:30:04 crc kubenswrapper[5113]: I0130 00:30:04.693768 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495550-446x2" Jan 30 00:30:04 crc kubenswrapper[5113]: I0130 00:30:04.829450 5113 reconciler_common.go:162] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8fd4k\" (UniqueName: \"kubernetes.io/projected/3e10bada-94e4-40c9-bacb-39cea7378d43-kube-api-access-8fd4k\") pod \"3e10bada-94e4-40c9-bacb-39cea7378d43\" (UID: \"3e10bada-94e4-40c9-bacb-39cea7378d43\") " Jan 30 00:30:04 crc kubenswrapper[5113]: I0130 00:30:04.838376 5113 operation_generator.go:781] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e10bada-94e4-40c9-bacb-39cea7378d43-kube-api-access-8fd4k" (OuterVolumeSpecName: "kube-api-access-8fd4k") pod "3e10bada-94e4-40c9-bacb-39cea7378d43" (UID: "3e10bada-94e4-40c9-bacb-39cea7378d43"). InnerVolumeSpecName "kube-api-access-8fd4k". PluginName "kubernetes.io/projected", VolumeGIDValue "" Jan 30 00:30:04 crc kubenswrapper[5113]: I0130 00:30:04.932293 5113 reconciler_common.go:299] "Volume detached for volume \"kube-api-access-8fd4k\" (UniqueName: \"kubernetes.io/projected/3e10bada-94e4-40c9-bacb-39cea7378d43-kube-api-access-8fd4k\") on node \"crc\" DevicePath \"\"" Jan 30 00:30:05 crc kubenswrapper[5113]: I0130 00:30:05.400405 5113 kubelet.go:2569] "SyncLoop (PLEG): event for pod" pod="openshift-infra/auto-csr-approver-29495550-446x2" event={"ID":"3e10bada-94e4-40c9-bacb-39cea7378d43","Type":"ContainerDied","Data":"f94e53a6a9e328335312cc06a7487908bb2b6f84d198f3298c5bf4c8b3611e23"} Jan 30 00:30:05 crc kubenswrapper[5113]: I0130 00:30:05.400476 5113 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f94e53a6a9e328335312cc06a7487908bb2b6f84d198f3298c5bf4c8b3611e23" Jan 30 00:30:05 crc kubenswrapper[5113]: I0130 00:30:05.400438 5113 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-infra/auto-csr-approver-29495550-446x2" Jan 30 00:30:05 crc kubenswrapper[5113]: I0130 00:30:05.786136 5113 kubelet.go:2553] "SyncLoop DELETE" source="api" pods=["openshift-infra/auto-csr-approver-29495544-mx26v"] Jan 30 00:30:05 crc kubenswrapper[5113]: I0130 00:30:05.789346 5113 kubelet.go:2547] "SyncLoop REMOVE" source="api" pods=["openshift-infra/auto-csr-approver-29495544-mx26v"] Jan 30 00:30:06 crc kubenswrapper[5113]: I0130 00:30:06.782293 5113 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61b8e575-6023-481f-88d1-4ead3fb76864" path="/var/lib/kubelet/pods/61b8e575-6023-481f-88d1-4ead3fb76864/volumes" Jan 30 00:30:07 crc kubenswrapper[5113]: E0130 00:30:07.775588 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:30:19 crc kubenswrapper[5113]: E0130 00:30:19.775849 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:30:32 crc kubenswrapper[5113]: I0130 00:30:32.777178 5113 provider.go:93] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 00:30:32 crc kubenswrapper[5113]: E0130 00:30:32.778954 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" Jan 30 00:30:42 crc kubenswrapper[5113]: I0130 00:30:42.454978 5113 scope.go:117] "RemoveContainer" containerID="527a3c407c72c80d5fc5511e63985368838a916d571fb649fd2aeaa8aaf2e565" Jan 30 00:30:47 crc kubenswrapper[5113]: E0130 00:30:47.775132 5113 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pull\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb\\\": ErrImagePull: unable to pull image or OCI artifact: pull image err: initializing source docker://registry.connect.redhat.com/elastic/eck@sha256:815e6949d8b96d832660e6ed715f8fbf080b230f1bccfc3e0f38781585b14eeb: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving; artifact err: get manifest: build image source: pinging container registry registry.connect.redhat.com: Get \\\"https://registry.connect.redhat.com/v2/\\\": dial tcp: lookup registry.connect.redhat.com on 199.204.47.54:53: server misbehaving\"" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5ejfk46" podUID="37e551ce-ff20-486b-986a-429cb060e341" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515136775713024464 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015136775714017402 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015136772661016523 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015136772662015474 5ustar corecore